hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
1c45be92d15874a5e90e8e60efad7107c63df898
43,600
py
Python
arelle/ValidateVersReport.py
theredpea/Arelle
e53097f142a69b2fefc18298a72f1f1b219b973d
[ "Apache-2.0" ]
1
2018-01-04T01:39:04.000Z
2018-01-04T01:39:04.000Z
arelle/ValidateVersReport.py
GuoHuiChen/Arelle
76b3c720e55348fd91b7be091040d2207f85400c
[ "Apache-2.0" ]
null
null
null
arelle/ValidateVersReport.py
GuoHuiChen/Arelle
76b3c720e55348fd91b7be091040d2207f85400c
[ "Apache-2.0" ]
null
null
null
''' Created on Nov 9, 2010 @author: Mark V Systems Limited (c) Copyright 2010 Mark V Systems Limited, All rights reserved. ''' from arelle import ModelVersObject, XbrlConst, ValidateXbrl, ModelDocument from arelle.ModelValue import qname conceptAttributeEventAttributes = { "conceptAttributeDelete": ("fromCustomAttribute",), "conceptAttributeAdd": ("toCustomAttribute",), "conceptAttributeChange": ("fromCustomAttribute","toCustomAttribute"), "conceptAttributeChange": ("fromCustomAttribute","toCustomAttribute"), "attributeDefinitionChange": ("fromCustomAttribute","toCustomAttribute"), } schemaAttributeEventAttributes = { "conceptIDChange": "id", "conceptTypeChange": "type", "conceptSubstitutionGroupChange": "substitutionGroup", "conceptNillableChange": "nillable", "conceptAbstractChange": "abstract", "conceptBlockChange": "block", "conceptDefaultChange": "default", "conceptFixedChange": "fixed", "conceptFinalChange": "final" } class ValidateVersReport(): def __init__(self, testModelXbrl): self.testModelXbrl = testModelXbrl # testcase or controlling validation object def close(self): self.__dict__.clear() # dereference everything def validate(self, modelVersReport): self.modelVersReport = modelVersReport versReport = modelVersReport.modelDocument if not hasattr(versReport, "xmlDocument"): # not parsed return for DTSname in ("fromDTS", "toDTS"): DTSmodelXbrl = getattr(versReport, DTSname) if DTSmodelXbrl is None or DTSmodelXbrl.modelDocument is None: self.modelVersReport.error("vere:invalidDTSIdentifier", _("%(dts)s is missing or not loaded"), modelObject=self, dts=DTSname) else: # validate DTS ValidateXbrl.ValidateXbrl(DTSmodelXbrl).validate(DTSmodelXbrl) if len(DTSmodelXbrl.errors) > 0: self.modelVersReport.error("vere:invalidDTSIdentifier", _("%(dts) has errors: %(error)s"), modelObject=DTSmodelXbrl.modelDocument, dts=DTSname, error=DTSmodelXbrl.errors) # validate linkbases ValidateXbrl.ValidateXbrl(self.modelVersReport).validate(modelVersReport) versReportElt = versReport.xmlRootElement # check actions for assignmentRef in versReportElt.iterdescendants(tag="{http://xbrl.org/2010/versioning-base}assignmentRef"): ref = assignmentRef.get("ref") if ref not in versReport.idObjects or \ not isinstance(versReport.idObjects[ref], ModelVersObject.ModelAssignment): self.modelVersReport.error("vere:invalidAssignmentRef", _("AssignmentRef %(assignmentRef)s does not reference an assignment"), modelObject=assignmentRef, assignmentRef=ref) # check namespace renames for NSrename in versReport.namespaceRenameFrom.values(): if NSrename.fromURI not in versReport.fromDTS.namespaceDocs: self.modelVersReport.error("vere:invalidNamespaceMapping", _("NamespaceRename fromURI %(uri)s does not reference a schema in fromDTS"), modelObject=self, uri=NSrename.fromURI) if NSrename.toURI not in versReport.toDTS.namespaceDocs: self.modelVersReport.error("vere:invalidNamespaceMapping", _("NamespaceRename toURI %(uri)s does not reference a schema in toDTS"), modelObject=self, uri=NSrename.toURI) # check role changes for roleChange in versReport.roleChanges.values(): if roleChange.fromURI not in versReport.fromDTS.roleTypes: self.modelVersReport.error("vere:invalidRoleChange", _("RoleChange fromURI %(uri)s does not reference a roleType in fromDTS"), modelObject=self, uri=roleChange.fromURI) if roleChange.toURI not in versReport.toDTS.roleTypes: self.modelVersReport.error("vere:invalidRoleChange", _("RoleChange toURI %(uri)s does not reference a roleType in toDTS"), modelObject=self, uri=roleChange.toURI) # check reportRefs # check actions for reportRef in versReportElt.iterdescendants(tag="{http://xbrl.org/2010/versioning-base}reportRef"): # if existing it must be valid href = reportRef.get("{http://www.w3.org/1999/xlink}href") # TBD if versReport.fromDTS and versReport.toDTS: # check concept changes of concept basic for conceptChange in versReport.conceptUseChanges: fromConceptQn = conceptChange.fromConceptQname toConceptQn = conceptChange.toConceptQname if (conceptChange.name != "conceptAdd" and (fromConceptQn is None or fromConceptQn not in versReport.fromDTS.qnameConcepts)): self.modelVersReport.error("vercue:invalidConceptReference", _("%(event)s fromConcept %(concept)s does not reference a concept in fromDTS"), modelObject=conceptChange, event=conceptChange.name, concept=conceptChange.fromConceptQname) if (conceptChange.name != "conceptDelete" and (toConceptQn is None or toConceptQn not in versReport.toDTS.qnameConcepts)): self.modelVersReport.error("vercue:invalidConceptReference", _("%(event)s toConcept %(concept)s does not reference a concept in toDTS"), modelObject=conceptChange, event=conceptChange.name, concept=conceptChange.toConceptQname) if (conceptChange.name == "conceptAdd" and toConceptQn is not None and conceptChange.isPhysical ^ (qname(versReport.namespaceRenameTo.get(toConceptQn.namespaceURI, toConceptQn.namespaceURI), toConceptQn.localName) not in versReport.fromDTS.qnameConcepts)): self.modelVersReport.error("vercue:inconsistentPhysicalAttribute", _("%(event)s toConcept %(concept)s physical attribute conflicts with presence in fromDTS"), modelObject=conceptChange, event=conceptChange.name, concept=conceptChange.toConceptQname) if (conceptChange.name == "conceptDelete" and toConceptQn is not None and conceptChange.isPhysical ^ (qname(versReport.namespaceRenameFrom.get(fromConceptQn.namespaceURI, fromConceptQn.namespaceURI), fromConceptQn.localName) in versReport.toDTS.qnameConcepts)): self.modelVersReport.error("vercue:inconsistentPhysicalAttribute", _("%(event)s toConcept %(concept)s physical attribute conflicts with presence in toDTS"), modelObject=conceptChange, event=conceptChange.name, concept=conceptChange.toConceptQname) # check concept changes of concept extended equivalentAttributes = {} for conceptChange in versReport.conceptDetailsChanges: fromConcept = conceptChange.fromConcept toConcept = conceptChange.toConcept fromResource = conceptChange.fromResource toResource = conceptChange.toResource # fromConcept checks if not conceptChange.name.endswith("Add"): if not fromConcept is not None: self.modelVersReport.error("vercue:invalidConceptReference", _("%(action)s %(event)s fromConcept %(concept)s does not reference a concept in fromDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, concept=conceptChange.fromConceptQname) # tuple check elif _("Child") in conceptChange.name and \ not versReport.fromDTS.qnameConcepts[fromConcept.qname] \ .isTuple: self.modelVersReport.error("vercue:invalidConceptReference", _("%(action)s %(event)s fromConcept %(concept)s must be defined as a tuple"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, concept=conceptChange.fromConceptQname) # resource check elif "Label" in conceptChange.name: if fromResource is None: self.modelVersReport.error("vercde:invalidResourceIdentifier", _("%(action)s %(event)s fromResource %(resource)s does not reference a resource in fromDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue) else: relationship = fromConcept.relationshipToResource(fromResource, XbrlConst.conceptLabel) if relationship is not None: if (relationship.qname != XbrlConst.qnLinkLabelArc or relationship.parentQname != XbrlConst.qnLinkLabelLink or fromResource.qname != XbrlConst.qnLinkLabel): self.modelVersReport.error("vercde:invalidConceptLabelIdentifier", _("%(action)s %(event)s fromResource %(resource)s for %(concept)s in fromDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname) else: relationship = fromConcept.relationshipToResource(fromResource, XbrlConst.elementLabel) if relationship is not None: if relationship.qname != XbrlConst.qnGenArc or \ fromResource.qname != XbrlConst.qnGenLabel: self.modelVersReport.error("vercde:invalidConceptLabelIdentifier", _("%(action)s %(event)s fromResource %(resource)s for %(concept)s in fromDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname) else: self.modelVersReport.error("vercde:invalidResourceIdentifier", _("%(action)s %(event)s fromResource %(resource)s does not have a label relationship to {3} in fromDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue) elif "Reference" in conceptChange.name: if fromResource is None: self.modelVersReport.error("vercde:invalidResourceIdentifier", _("%(action)s %(event)s fromResource %(resource)s does not reference a resource in fromDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue) else: relationship = fromConcept.relationshipToResource(fromResource, XbrlConst.conceptReference) if relationship is not None: if relationship.qname != XbrlConst.qnLinkReferenceArc or \ relationship.parentQname != XbrlConst.qnLinkReferenceLink or \ fromResource.qname != XbrlConst.qnLinkReference: self.modelVersReport.error("vercde:invalidConceptReferenceIdentifier", _("%(action)s %(event)s fromResource %(resource)s for %(concept)s in fromDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname) else: relationship = fromConcept.relationshipToResource(fromResource, XbrlConst.elementReference) if relationship is not None: if relationship.qname != XbrlConst.qnGenArc or \ fromResource.qname != XbrlConst.qnGenReference: self.modelVersReport.error("vercde:invalidConceptReferenceIdentifier", _("%(action)s %(event)s fromResource %(resource)s for %(concept)s in fromDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname) else: self.modelVersReport.error("vercde:invalidResourceIdentifier", _("%(action)s %(event)s fromResource %(resource)s does not have a reference relationship to %(concept)s in fromDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname) # toConcept checks if not conceptChange.name.endswith("Delete"): if not toConcept is not None: self.modelVersReport.error("vercue:invalidConceptReference", _("%(action)s %(event)s toConcept %(concept)s does not reference a concept in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, concept=conceptChange.toConceptQname) # tuple check elif "Child" in conceptChange.name and \ not versReport.toDTS.qnameConcepts[toConcept.qname] \ .isTuple: self.modelVersReport.error("vercue:invalidConceptReference", _("%(action)s %(event)s toConcept %(concept)s must be defined as a tuple"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, concept=conceptChange.toConceptQname) # resource check elif "Label" in conceptChange.name: if toResource is None: self.modelVersReport.error("vercde:invalidResourceIdentifier", _("%(action)s %(event)s toResource %(resource)s for %(concept)s does not reference a resource in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) elif toResource.qname not in (XbrlConst.qnLinkLabel, XbrlConst.qnGenLabel): self.modelVersReport.error("vercde:invalidConceptLabelIdentifier", _("%(action)s %(event)s toResource %(resource)s is not a label in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) else: relationship = toConcept.relationshipToResource(toResource, XbrlConst.conceptLabel) if relationship is not None: if relationship.qname != XbrlConst.qnLinkLabelArc or \ relationship.parentQname != XbrlConst.qnLinkLabelLink or \ toResource.qname != XbrlConst.qnLinkLabel: self.modelVersReport.error("vercde:invalidConceptLabelIdentifier", _("%(action)s %(event)s toResource %(resource)s for %(concept)s in toDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) else: relationship = toConcept.relationshipToResource(toResource, XbrlConst.elementLabel) if relationship is not None: if relationship.qname != XbrlConst.qnGenArc or \ toResource.qname != XbrlConst.qnGenLabel: self.modelVersReport.error("vercde:invalidConceptLabelIdentifier", _("%(action)s %(event)s toResource %(resource)s for %(concept)s in toDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) else: self.modelVersReport.error("vercde:invalidConceptResourceIdentifier", _("%(action)s %(event)s toResource %(resource)s does not have a label relationship to %(concept)s in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) elif "Reference" in conceptChange.name: if toResource is None: self.modelVersReport.error("vercde:invalidResourceIdentifier", _("%(action)s %(event)s toResource %(resource)s does not reference a resource in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue) elif toResource.qname not in (XbrlConst.qnLinkReference, XbrlConst.qnGenReference): self.modelVersReport.error("vercde:invalidConceptReferenceIdentifier", _("%(action)s %(event)s toResource %(resource)s is not a reference in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) else: relationship = toConcept.relationshipToResource(toResource, XbrlConst.conceptReference) if relationship is not None: if relationship.qname != XbrlConst.qnLinkReferenceArc or \ relationship.parentQname != XbrlConst.qnLinkReferenceLink or \ toResource.qname != XbrlConst.qnLinkReference: self.modelVersReport.error("vercde:invalidConceptReferenceIdentifier", _("%(action)s %(event)s toResource %(resource)s for %(concept)s in toDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) else: relationship = toConcept.relationshipToResource(toResource, XbrlConst.elementReference) if relationship is not None: if relationship.qname != XbrlConst.qnGenArc or \ toResource.qname != XbrlConst.qnGenReference: self.modelVersReport.error("vercde:invalidConceptReferenceIdentifier", _("%(action)s %(event)s toResource %(resource)s for %(concept)s in toDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) else: self.modelVersReport.error("vercde:invalidConceptResourceIdentifier", _("%(action)s %(event)s toResource %(resource)s does not have a reference relationship to %(concept)s in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) # check concept correspondence if fromConcept is not None and toConcept is not None: if (versReport.toDTSqname(fromConcept.qname) != toConcept.qname and versReport.equivalentConcepts.get(fromConcept.qname) != toConcept.qname and toConcept.qname not in versReport.relatedConcepts.get(fromConcept.qname,[])): self.modelVersReport.error("vercde:invalidConceptCorrespondence", _("%(action)s %(event)s fromConcept %(conceptFrom)s and toConcept %(conceptTo)s must be equivalent or related"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, conceptFrom=conceptChange.fromConceptQname, conceptTo=conceptChange.toConceptQname) # custom attribute events if conceptChange.name.startswith("conceptAttribute") or conceptChange.name == "attributeDefinitionChange": try: for attr in conceptAttributeEventAttributes[conceptChange.name]: customAttributeQname = conceptChange.customAttributeQname(attr) if not customAttributeQname: self.modelVersReport.info("arelle:invalidAttributeChange", _("%(action)s %(event)s %(attr)s $(attrName)s does not have a name"), modelObject=conceptChange, action=conceptChange.actionId, attr=attr, attrName=customAttributeQname) elif customAttributeQname.namespaceURI in (None, XbrlConst.xbrli, XbrlConst.xsd): self.modelVersReport.error("vercde:illegalCustomAttributeEvent", _("%(action)s %(event)s %(attr)s $(attrName)s has an invalid namespace"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, attr=attr, attrName=customAttributeQname) except KeyError: self.modelVersReport.info("arelle:eventNotRecognized", _("%(action)s %(event)s event is not recognized"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name) if conceptChange.name == "attributeDefinitionChange": fromAttr = conceptChange.customAttributeQname("fromCustomAttribute") toAttr = conceptChange.customAttributeQname("toCustomAttribute") equivalentAttributes[fromAttr] = toAttr equivalentAttributes[toAttr] = fromAttr # check item concept identifiers if conceptChange.name in ("conceptPeriodTypeChange", "conceptPeriodTypeChange"): for concept in (fromConcept, toConcept): if concept is not None and not concept.isItem: self.modelVersReport.error("vercde:invalidItemConceptIdentifier", _("%(action)s %(event)s concept %(concept)s does not reference an item concept."), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, concept=concept.qname) # check tuple concept identifiers if conceptChange.name in ("tupleContentModelChange", ): for concept in (fromConcept, toConcept): if concept is not None and not concept.isItem: self.modelVersReport.error("vercde:invalidTupleConceptIdentifier", _("%(action)s %(event)s concept %(concept)s does not reference a tuple concept."), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, concept=concept.qname) if conceptChange.name in schemaAttributeEventAttributes: attr = schemaAttributeEventAttributes[conceptChange.name] if (fromConcept is not None and not fromConcept.get(attr) and toConcept is not None and not toConcept.get(attr)): self.modelVersReport.error("vercde:illegalSchemaAttributeChangeEvent", _("%(action)s %(event)s neither concepts have a %(attribute)s attribute: %(fromConcept)s, %(toConcept)s."), modelObject=conceptChange, action=conceptChange.actionId, attribute=attr, event=conceptChange.name, fromConcept=fromConcept.qname, toConcept=toConcept.qname) # check concept changes for equivalent attributes for conceptChange in versReport.conceptDetailsChanges: if conceptChange.name == "conceptAttributeChange": fromAttr = conceptChange.customAttributeQname("fromCustomAttribute") toAttr = conceptChange.customAttributeQname("toCustomAttribute") if (equivalentAttributes.get(fromAttr) != toAttr and (fromAttr.localName != toAttr.localName or (fromAttr.namespaceURI != toAttr.namespaceURI and versReport.namespaceRenameFrom.get(fromAttr.namespaceURI, fromAttr.namespaceURI) != toAttr.namespaceURI))): self.modelVersReport.error("vercde:invalidAttributeCorrespondence", _("%(action)s %(event)s has non-equivalent attributes %(fromQname)s and %(toQname)s"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, fromQname=fromAttr, toQname=toAttr) del equivalentAttributes # dereference # check relationship set changes for relSetChange in versReport.relationshipSetChanges: for relationshipSet, name in ((relSetChange.fromRelationshipSet, "fromRelationshipSet"), (relSetChange.toRelationshipSet, "toRelationshipSet")): if relationshipSet is not None: dts = relationshipSet.dts relationshipSetValid = True if relationshipSet.link: if (relationshipSet.link not in dts.qnameConcepts or (dts.qnameConcepts[relationshipSet.link].type is not None and not dts.qnameConcepts[relationshipSet.link].type.isDerivedFrom(XbrlConst.qnXlExtendedType))): self.modelVersReport.error("verrelse:invalidLinkElementReferenceEvent", _("%(event)s %(relSet)s link %(link)s does not reference an element in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, link=relationshipSet.link) relationshipSetValid = False if relationshipSet.arc: if (relationshipSet.arc not in dts.qnameConcepts or (dts.qnameConcepts[relationshipSet.arc].type is not None and not dts.qnameConcepts[relationshipSet.arc].type.isDerivedFrom(XbrlConst.qnXlArcType))): self.modelVersReport.error("verrelse:invalidArcElementReferenceEvent", _("%(event)s %(relSet)s arc %(arc) does not reference an element in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, arc=relationshipSet.arc) relationshipSetValid = False if relationshipSet.linkrole: if not (XbrlConst.isStandardRole(relationshipSet.linkrole) or relationshipSet.linkrole in relationshipSet.dts.roleTypes): self.modelVersReport.error("verrelse:invalidLinkrole", _("%(event)s %(relSet)s linkrole %(linkrole)s does not reference an linkrole in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, linkrole=relationshipSet.linkrole) relationshipSetValid = False elif not any(linkrole == relationshipSet.linkrole for arcrole, linkrole, linkqname, arcqname in dts.baseSets.keys()): self.modelVersReport.error("verrelse:invalidLinkrole", _("%(event)s %(relSet)s linkrole %(linkrole)s is not used in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, linkrole=relationshipSet.linkrole) relationshipSetValid = False if relationshipSet.arcrole: if not (XbrlConst.isStandardArcrole(relationshipSet.arcrole) or relationshipSet.arcrole in relationshipSet.dts.arcroleTypes): self.modelVersReport.error("verrelse:invalidArcrole", _("%(event)s %(relSet)s arcrole %(arcrole)s does not reference an arcrole in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, arcrole=relationshipSet.arcrole) relationshipSetValid = False elif not any(arcrole == relationshipSet.arcrole for arcrole, linkrole, linkqname, arcqname in dts.baseSets.keys()): self.modelVersReport.error("verrelse:invalidArcrole", _("%(event)s %(relSet)s arcrole %(arcrole)s is not used in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, arcrole=relationshipSet.arcrole) relationshipSetValid = False for relationship in relationshipSet.relationships: # fromConcept checks if relationship.fromConcept is None: self.modelVersReport.error("vercue:invalidConceptReference", _("%(event)s %(relSet)s relationship fromConcept %(conceptFrom)s does not reference a concept in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, conceptFrom=relationship.fromName) relationshipSetValid = False if relationship.toName and relationship.toConcept is None: self.modelVersReport.error("vercue:invalidConceptReference", _("%(event)s %(relSet)s relationship toConcept %(conceptTo)s does not reference a concept in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, conceptTo=relationship.toName) relationshipSetValid = False if relationshipSetValid: # test that relations exist if relationship.fromRelationship is None: if relationship.toName: self.modelVersReport.error("verrelse:invalidRelationshipReference", _("%(event)s %(relSet)s no relationship found from fromConcept %(conceptFrom)s to toConcept %(conceptTo)s in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, conceptFrom=relationship.fromName, conceptTo=relationship.toName) else: self.modelVersReport.error("verrelse:invalidRelationshipReference", _("%(event)s %(relSet)s no relationship found fromConcept %(conceptFrom)s in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, conceptFrom=relationship.fromName) # check instance aspect changes for iaChange in versReport.instanceAspectChanges: for instAspects in (iaChange.fromAspects, iaChange.toAspects): if instAspects is not None and instAspects.aspects: dimAspectElts = {} for aspect in instAspects.aspects: dts = aspect.modelAspects.dts if (aspect.localName in ("explicitDimension", "typedDimension") and aspect.concept is None): self.modelVersReport.error("vercue:invalidConceptReference", _("%(event)s dimension %(dimension)s is not a concept in its DTS"), modelObject=aspect, event=iaChange.name, dimension=aspect.conceptName) elif aspect.localName == "explicitDimension": dimConcept = aspect.concept if not dimConcept.isExplicitDimension: self.modelVersReport.error("verdime:invalidExplicitDimensionIdentifier", _("%(event)s dimension %(dimension)s is not an explicit dimension in its DTS"), modelObject=aspect, event=iaChange.name, dimension=aspect.conceptName) if dimConcept in dimAspectElts: self.modelVersReport.error("verdime:duplicateExplicitDimensionAspect", _("%(event)s dimension %(dimension)s is duplicated in a single explicitDimension element"), modelObject=(aspect, dimAspectElts[dimConcept]), event=iaChange.name, dimension=aspect.conceptName) else: dimAspectElts[dimConcept] = aspect elif aspect.localName == "typedDimension": dimConcept = aspect.concept if not dimConcept.isTypedDimension: self.modelVersReport.error("verdime:invalidTypedDimensionIdentifier", _("%(event)s dimension %(dimension)s is not a typed dimension in its DTS"), modelObject=aspect, event=iaChange.name, dimension=aspect.conceptName) if dimConcept in dimAspectElts: self.modelVersReport.error("verdime:duplicateTypedDimensionAspect", _("%(event)s dimension %(dimension)s is duplicated in a single explicitDimension element"), modelObject=(aspect, dimAspectElts[dimConcept]), event=iaChange.name, dimension=aspect.conceptName) else: dimAspectElts[dimConcept] = aspect if aspect.localName in ("explicitDimension", "concepts"): for relatedConcept in aspect.relatedConcepts: conceptMdlObj = relatedConcept.concept if conceptMdlObj is None or not conceptMdlObj.isItem: self.modelVersReport.error("vercue:invalidConceptReference", _("%(event)s concept %(concept)s is not an item in its DTS"), modelObject=aspect, event=iaChange.name, concept=relatedConcept.conceptName) if relatedConcept.arcrole is not None: if (not XbrlConst.isStandardArcrole(relatedConcept.arcrole) and relatedConcept.arcrole not in dts.arcroleTypes): self.modelVersReport.error("verdime:invalidURI", _("%(event)s arcrole %(arcrole)s is not defined in its DTS"), modelObject=aspect, event=iaChange.name, arcrole=relatedConcept.arcrole) elif not any(arcrole == relatedConcept.arcrole for arcrole, linkrole, linkqname, arcqname in dts.baseSets.keys()): self.modelVersReport.error("verdime:invalidURI", _("%(event)s arcrole %(arcrole)s is not used in its DTS"), modelObject=aspect, event=iaChange.name, linkrole=relatedConcept.arcrole) if relatedConcept.linkrole is not None: if (relatedConcept.linkrole != "http://www.xbrl.org/2003/role/link" and relatedConcept.linkrole not in dts.roleTypes): self.modelVersReport.error("verdime:invalidURI", _("%(event)s linkrole %(linkrole)s is not defined in its DTS"), modelObject=aspect, event=iaChange.name, linkrole=relatedConcept.linkrole) elif not any(linkrole == relatedConcept.linkrole for arcrole, linkrole, linkqname, arcqname in dts.baseSets.keys()): self.modelVersReport.error("verdime:invalidURI", _("%(event)s linkrole %(linkrole)s is not used in its DTS"), modelObject=aspect, event=iaChange.name, linkrole=relatedConcept.linkrole) if (relatedConcept.arc is not None and (relatedConcept.arc not in dts.qnameConcepts or (dts.qnameConcepts[relatedConcept.arc].type is not None and not dts.qnameConcepts[relatedConcept.arc].type.isDerivedFrom(XbrlConst.qnXlArcType)))): self.modelVersReport.error("verdime:invalidArcElement", _("%(event)s arc %(arc)s is not defined as an arc in its DTS"), modelObject=aspect, event=iaChange.name, arc=relatedConcept.arc) if (relatedConcept.link is not None and (relatedConcept.link not in dts.qnameConcepts or (dts.qnameConcepts[relatedConcept.link].type is not None and not dts.qnameConcepts[relatedConcept.link].type.isDerivedFrom(XbrlConst.qnXlExtendedType)))): self.modelVersReport.error("verdime:invalidLinkElement", _("%(event)s link %(link)s is not defined in its DTS"), modelObject=aspect, event=iaChange.name, link=relatedConcept.link) self.close()
80.740741
176
0.547546
from arelle import ModelVersObject, XbrlConst, ValidateXbrl, ModelDocument from arelle.ModelValue import qname conceptAttributeEventAttributes = { "conceptAttributeDelete": ("fromCustomAttribute",), "conceptAttributeAdd": ("toCustomAttribute",), "conceptAttributeChange": ("fromCustomAttribute","toCustomAttribute"), "conceptAttributeChange": ("fromCustomAttribute","toCustomAttribute"), "attributeDefinitionChange": ("fromCustomAttribute","toCustomAttribute"), } schemaAttributeEventAttributes = { "conceptIDChange": "id", "conceptTypeChange": "type", "conceptSubstitutionGroupChange": "substitutionGroup", "conceptNillableChange": "nillable", "conceptAbstractChange": "abstract", "conceptBlockChange": "block", "conceptDefaultChange": "default", "conceptFixedChange": "fixed", "conceptFinalChange": "final" } class ValidateVersReport(): def __init__(self, testModelXbrl): self.testModelXbrl = testModelXbrl def close(self): self.__dict__.clear() def validate(self, modelVersReport): self.modelVersReport = modelVersReport versReport = modelVersReport.modelDocument if not hasattr(versReport, "xmlDocument"): return for DTSname in ("fromDTS", "toDTS"): DTSmodelXbrl = getattr(versReport, DTSname) if DTSmodelXbrl is None or DTSmodelXbrl.modelDocument is None: self.modelVersReport.error("vere:invalidDTSIdentifier", _("%(dts)s is missing or not loaded"), modelObject=self, dts=DTSname) else: ValidateXbrl.ValidateXbrl(DTSmodelXbrl).validate(DTSmodelXbrl) if len(DTSmodelXbrl.errors) > 0: self.modelVersReport.error("vere:invalidDTSIdentifier", _("%(dts) has errors: %(error)s"), modelObject=DTSmodelXbrl.modelDocument, dts=DTSname, error=DTSmodelXbrl.errors) ValidateXbrl.ValidateXbrl(self.modelVersReport).validate(modelVersReport) versReportElt = versReport.xmlRootElement for assignmentRef in versReportElt.iterdescendants(tag="{http://xbrl.org/2010/versioning-base}assignmentRef"): ref = assignmentRef.get("ref") if ref not in versReport.idObjects or \ not isinstance(versReport.idObjects[ref], ModelVersObject.ModelAssignment): self.modelVersReport.error("vere:invalidAssignmentRef", _("AssignmentRef %(assignmentRef)s does not reference an assignment"), modelObject=assignmentRef, assignmentRef=ref) for NSrename in versReport.namespaceRenameFrom.values(): if NSrename.fromURI not in versReport.fromDTS.namespaceDocs: self.modelVersReport.error("vere:invalidNamespaceMapping", _("NamespaceRename fromURI %(uri)s does not reference a schema in fromDTS"), modelObject=self, uri=NSrename.fromURI) if NSrename.toURI not in versReport.toDTS.namespaceDocs: self.modelVersReport.error("vere:invalidNamespaceMapping", _("NamespaceRename toURI %(uri)s does not reference a schema in toDTS"), modelObject=self, uri=NSrename.toURI) for roleChange in versReport.roleChanges.values(): if roleChange.fromURI not in versReport.fromDTS.roleTypes: self.modelVersReport.error("vere:invalidRoleChange", _("RoleChange fromURI %(uri)s does not reference a roleType in fromDTS"), modelObject=self, uri=roleChange.fromURI) if roleChange.toURI not in versReport.toDTS.roleTypes: self.modelVersReport.error("vere:invalidRoleChange", _("RoleChange toURI %(uri)s does not reference a roleType in toDTS"), modelObject=self, uri=roleChange.toURI) for reportRef in versReportElt.iterdescendants(tag="{http://xbrl.org/2010/versioning-base}reportRef"): href = reportRef.get("{http://www.w3.org/1999/xlink}href") if versReport.fromDTS and versReport.toDTS: for conceptChange in versReport.conceptUseChanges: fromConceptQn = conceptChange.fromConceptQname toConceptQn = conceptChange.toConceptQname if (conceptChange.name != "conceptAdd" and (fromConceptQn is None or fromConceptQn not in versReport.fromDTS.qnameConcepts)): self.modelVersReport.error("vercue:invalidConceptReference", _("%(event)s fromConcept %(concept)s does not reference a concept in fromDTS"), modelObject=conceptChange, event=conceptChange.name, concept=conceptChange.fromConceptQname) if (conceptChange.name != "conceptDelete" and (toConceptQn is None or toConceptQn not in versReport.toDTS.qnameConcepts)): self.modelVersReport.error("vercue:invalidConceptReference", _("%(event)s toConcept %(concept)s does not reference a concept in toDTS"), modelObject=conceptChange, event=conceptChange.name, concept=conceptChange.toConceptQname) if (conceptChange.name == "conceptAdd" and toConceptQn is not None and conceptChange.isPhysical ^ (qname(versReport.namespaceRenameTo.get(toConceptQn.namespaceURI, toConceptQn.namespaceURI), toConceptQn.localName) not in versReport.fromDTS.qnameConcepts)): self.modelVersReport.error("vercue:inconsistentPhysicalAttribute", _("%(event)s toConcept %(concept)s physical attribute conflicts with presence in fromDTS"), modelObject=conceptChange, event=conceptChange.name, concept=conceptChange.toConceptQname) if (conceptChange.name == "conceptDelete" and toConceptQn is not None and conceptChange.isPhysical ^ (qname(versReport.namespaceRenameFrom.get(fromConceptQn.namespaceURI, fromConceptQn.namespaceURI), fromConceptQn.localName) in versReport.toDTS.qnameConcepts)): self.modelVersReport.error("vercue:inconsistentPhysicalAttribute", _("%(event)s toConcept %(concept)s physical attribute conflicts with presence in toDTS"), modelObject=conceptChange, event=conceptChange.name, concept=conceptChange.toConceptQname) equivalentAttributes = {} for conceptChange in versReport.conceptDetailsChanges: fromConcept = conceptChange.fromConcept toConcept = conceptChange.toConcept fromResource = conceptChange.fromResource toResource = conceptChange.toResource if not conceptChange.name.endswith("Add"): if not fromConcept is not None: self.modelVersReport.error("vercue:invalidConceptReference", _("%(action)s %(event)s fromConcept %(concept)s does not reference a concept in fromDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, concept=conceptChange.fromConceptQname) elif _("Child") in conceptChange.name and \ not versReport.fromDTS.qnameConcepts[fromConcept.qname] \ .isTuple: self.modelVersReport.error("vercue:invalidConceptReference", _("%(action)s %(event)s fromConcept %(concept)s must be defined as a tuple"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, concept=conceptChange.fromConceptQname) elif "Label" in conceptChange.name: if fromResource is None: self.modelVersReport.error("vercde:invalidResourceIdentifier", _("%(action)s %(event)s fromResource %(resource)s does not reference a resource in fromDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue) else: relationship = fromConcept.relationshipToResource(fromResource, XbrlConst.conceptLabel) if relationship is not None: if (relationship.qname != XbrlConst.qnLinkLabelArc or relationship.parentQname != XbrlConst.qnLinkLabelLink or fromResource.qname != XbrlConst.qnLinkLabel): self.modelVersReport.error("vercde:invalidConceptLabelIdentifier", _("%(action)s %(event)s fromResource %(resource)s for %(concept)s in fromDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname) else: relationship = fromConcept.relationshipToResource(fromResource, XbrlConst.elementLabel) if relationship is not None: if relationship.qname != XbrlConst.qnGenArc or \ fromResource.qname != XbrlConst.qnGenLabel: self.modelVersReport.error("vercde:invalidConceptLabelIdentifier", _("%(action)s %(event)s fromResource %(resource)s for %(concept)s in fromDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname) else: self.modelVersReport.error("vercde:invalidResourceIdentifier", _("%(action)s %(event)s fromResource %(resource)s does not have a label relationship to {3} in fromDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue) elif "Reference" in conceptChange.name: if fromResource is None: self.modelVersReport.error("vercde:invalidResourceIdentifier", _("%(action)s %(event)s fromResource %(resource)s does not reference a resource in fromDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue) else: relationship = fromConcept.relationshipToResource(fromResource, XbrlConst.conceptReference) if relationship is not None: if relationship.qname != XbrlConst.qnLinkReferenceArc or \ relationship.parentQname != XbrlConst.qnLinkReferenceLink or \ fromResource.qname != XbrlConst.qnLinkReference: self.modelVersReport.error("vercde:invalidConceptReferenceIdentifier", _("%(action)s %(event)s fromResource %(resource)s for %(concept)s in fromDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname) else: relationship = fromConcept.relationshipToResource(fromResource, XbrlConst.elementReference) if relationship is not None: if relationship.qname != XbrlConst.qnGenArc or \ fromResource.qname != XbrlConst.qnGenReference: self.modelVersReport.error("vercde:invalidConceptReferenceIdentifier", _("%(action)s %(event)s fromResource %(resource)s for %(concept)s in fromDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname) else: self.modelVersReport.error("vercde:invalidResourceIdentifier", _("%(action)s %(event)s fromResource %(resource)s does not have a reference relationship to %(concept)s in fromDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname) if not conceptChange.name.endswith("Delete"): if not toConcept is not None: self.modelVersReport.error("vercue:invalidConceptReference", _("%(action)s %(event)s toConcept %(concept)s does not reference a concept in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, concept=conceptChange.toConceptQname) elif "Child" in conceptChange.name and \ not versReport.toDTS.qnameConcepts[toConcept.qname] \ .isTuple: self.modelVersReport.error("vercue:invalidConceptReference", _("%(action)s %(event)s toConcept %(concept)s must be defined as a tuple"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, concept=conceptChange.toConceptQname) elif "Label" in conceptChange.name: if toResource is None: self.modelVersReport.error("vercde:invalidResourceIdentifier", _("%(action)s %(event)s toResource %(resource)s for %(concept)s does not reference a resource in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) elif toResource.qname not in (XbrlConst.qnLinkLabel, XbrlConst.qnGenLabel): self.modelVersReport.error("vercde:invalidConceptLabelIdentifier", _("%(action)s %(event)s toResource %(resource)s is not a label in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) else: relationship = toConcept.relationshipToResource(toResource, XbrlConst.conceptLabel) if relationship is not None: if relationship.qname != XbrlConst.qnLinkLabelArc or \ relationship.parentQname != XbrlConst.qnLinkLabelLink or \ toResource.qname != XbrlConst.qnLinkLabel: self.modelVersReport.error("vercde:invalidConceptLabelIdentifier", _("%(action)s %(event)s toResource %(resource)s for %(concept)s in toDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) else: relationship = toConcept.relationshipToResource(toResource, XbrlConst.elementLabel) if relationship is not None: if relationship.qname != XbrlConst.qnGenArc or \ toResource.qname != XbrlConst.qnGenLabel: self.modelVersReport.error("vercde:invalidConceptLabelIdentifier", _("%(action)s %(event)s toResource %(resource)s for %(concept)s in toDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) else: self.modelVersReport.error("vercde:invalidConceptResourceIdentifier", _("%(action)s %(event)s toResource %(resource)s does not have a label relationship to %(concept)s in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) elif "Reference" in conceptChange.name: if toResource is None: self.modelVersReport.error("vercde:invalidResourceIdentifier", _("%(action)s %(event)s toResource %(resource)s does not reference a resource in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue) elif toResource.qname not in (XbrlConst.qnLinkReference, XbrlConst.qnGenReference): self.modelVersReport.error("vercde:invalidConceptReferenceIdentifier", _("%(action)s %(event)s toResource %(resource)s is not a reference in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) else: relationship = toConcept.relationshipToResource(toResource, XbrlConst.conceptReference) if relationship is not None: if relationship.qname != XbrlConst.qnLinkReferenceArc or \ relationship.parentQname != XbrlConst.qnLinkReferenceLink or \ toResource.qname != XbrlConst.qnLinkReference: self.modelVersReport.error("vercde:invalidConceptReferenceIdentifier", _("%(action)s %(event)s toResource %(resource)s for %(concept)s in toDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) else: relationship = toConcept.relationshipToResource(toResource, XbrlConst.elementReference) if relationship is not None: if relationship.qname != XbrlConst.qnGenArc or \ toResource.qname != XbrlConst.qnGenReference: self.modelVersReport.error("vercde:invalidConceptReferenceIdentifier", _("%(action)s %(event)s toResource %(resource)s for %(concept)s in toDTS does not have expected link, arc, or label elements"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) else: self.modelVersReport.error("vercde:invalidConceptResourceIdentifier", _("%(action)s %(event)s toResource %(resource)s does not have a reference relationship to %(concept)s in toDTS"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname) if fromConcept is not None and toConcept is not None: if (versReport.toDTSqname(fromConcept.qname) != toConcept.qname and versReport.equivalentConcepts.get(fromConcept.qname) != toConcept.qname and toConcept.qname not in versReport.relatedConcepts.get(fromConcept.qname,[])): self.modelVersReport.error("vercde:invalidConceptCorrespondence", _("%(action)s %(event)s fromConcept %(conceptFrom)s and toConcept %(conceptTo)s must be equivalent or related"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, conceptFrom=conceptChange.fromConceptQname, conceptTo=conceptChange.toConceptQname) if conceptChange.name.startswith("conceptAttribute") or conceptChange.name == "attributeDefinitionChange": try: for attr in conceptAttributeEventAttributes[conceptChange.name]: customAttributeQname = conceptChange.customAttributeQname(attr) if not customAttributeQname: self.modelVersReport.info("arelle:invalidAttributeChange", _("%(action)s %(event)s %(attr)s $(attrName)s does not have a name"), modelObject=conceptChange, action=conceptChange.actionId, attr=attr, attrName=customAttributeQname) elif customAttributeQname.namespaceURI in (None, XbrlConst.xbrli, XbrlConst.xsd): self.modelVersReport.error("vercde:illegalCustomAttributeEvent", _("%(action)s %(event)s %(attr)s $(attrName)s has an invalid namespace"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, attr=attr, attrName=customAttributeQname) except KeyError: self.modelVersReport.info("arelle:eventNotRecognized", _("%(action)s %(event)s event is not recognized"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name) if conceptChange.name == "attributeDefinitionChange": fromAttr = conceptChange.customAttributeQname("fromCustomAttribute") toAttr = conceptChange.customAttributeQname("toCustomAttribute") equivalentAttributes[fromAttr] = toAttr equivalentAttributes[toAttr] = fromAttr if conceptChange.name in ("conceptPeriodTypeChange", "conceptPeriodTypeChange"): for concept in (fromConcept, toConcept): if concept is not None and not concept.isItem: self.modelVersReport.error("vercde:invalidItemConceptIdentifier", _("%(action)s %(event)s concept %(concept)s does not reference an item concept."), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, concept=concept.qname) if conceptChange.name in ("tupleContentModelChange", ): for concept in (fromConcept, toConcept): if concept is not None and not concept.isItem: self.modelVersReport.error("vercde:invalidTupleConceptIdentifier", _("%(action)s %(event)s concept %(concept)s does not reference a tuple concept."), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, concept=concept.qname) if conceptChange.name in schemaAttributeEventAttributes: attr = schemaAttributeEventAttributes[conceptChange.name] if (fromConcept is not None and not fromConcept.get(attr) and toConcept is not None and not toConcept.get(attr)): self.modelVersReport.error("vercde:illegalSchemaAttributeChangeEvent", _("%(action)s %(event)s neither concepts have a %(attribute)s attribute: %(fromConcept)s, %(toConcept)s."), modelObject=conceptChange, action=conceptChange.actionId, attribute=attr, event=conceptChange.name, fromConcept=fromConcept.qname, toConcept=toConcept.qname) for conceptChange in versReport.conceptDetailsChanges: if conceptChange.name == "conceptAttributeChange": fromAttr = conceptChange.customAttributeQname("fromCustomAttribute") toAttr = conceptChange.customAttributeQname("toCustomAttribute") if (equivalentAttributes.get(fromAttr) != toAttr and (fromAttr.localName != toAttr.localName or (fromAttr.namespaceURI != toAttr.namespaceURI and versReport.namespaceRenameFrom.get(fromAttr.namespaceURI, fromAttr.namespaceURI) != toAttr.namespaceURI))): self.modelVersReport.error("vercde:invalidAttributeCorrespondence", _("%(action)s %(event)s has non-equivalent attributes %(fromQname)s and %(toQname)s"), modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name, fromQname=fromAttr, toQname=toAttr) del equivalentAttributes for relSetChange in versReport.relationshipSetChanges: for relationshipSet, name in ((relSetChange.fromRelationshipSet, "fromRelationshipSet"), (relSetChange.toRelationshipSet, "toRelationshipSet")): if relationshipSet is not None: dts = relationshipSet.dts relationshipSetValid = True if relationshipSet.link: if (relationshipSet.link not in dts.qnameConcepts or (dts.qnameConcepts[relationshipSet.link].type is not None and not dts.qnameConcepts[relationshipSet.link].type.isDerivedFrom(XbrlConst.qnXlExtendedType))): self.modelVersReport.error("verrelse:invalidLinkElementReferenceEvent", _("%(event)s %(relSet)s link %(link)s does not reference an element in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, link=relationshipSet.link) relationshipSetValid = False if relationshipSet.arc: if (relationshipSet.arc not in dts.qnameConcepts or (dts.qnameConcepts[relationshipSet.arc].type is not None and not dts.qnameConcepts[relationshipSet.arc].type.isDerivedFrom(XbrlConst.qnXlArcType))): self.modelVersReport.error("verrelse:invalidArcElementReferenceEvent", _("%(event)s %(relSet)s arc %(arc) does not reference an element in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, arc=relationshipSet.arc) relationshipSetValid = False if relationshipSet.linkrole: if not (XbrlConst.isStandardRole(relationshipSet.linkrole) or relationshipSet.linkrole in relationshipSet.dts.roleTypes): self.modelVersReport.error("verrelse:invalidLinkrole", _("%(event)s %(relSet)s linkrole %(linkrole)s does not reference an linkrole in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, linkrole=relationshipSet.linkrole) relationshipSetValid = False elif not any(linkrole == relationshipSet.linkrole for arcrole, linkrole, linkqname, arcqname in dts.baseSets.keys()): self.modelVersReport.error("verrelse:invalidLinkrole", _("%(event)s %(relSet)s linkrole %(linkrole)s is not used in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, linkrole=relationshipSet.linkrole) relationshipSetValid = False if relationshipSet.arcrole: if not (XbrlConst.isStandardArcrole(relationshipSet.arcrole) or relationshipSet.arcrole in relationshipSet.dts.arcroleTypes): self.modelVersReport.error("verrelse:invalidArcrole", _("%(event)s %(relSet)s arcrole %(arcrole)s does not reference an arcrole in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, arcrole=relationshipSet.arcrole) relationshipSetValid = False elif not any(arcrole == relationshipSet.arcrole for arcrole, linkrole, linkqname, arcqname in dts.baseSets.keys()): self.modelVersReport.error("verrelse:invalidArcrole", _("%(event)s %(relSet)s arcrole %(arcrole)s is not used in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, arcrole=relationshipSet.arcrole) relationshipSetValid = False for relationship in relationshipSet.relationships: if relationship.fromConcept is None: self.modelVersReport.error("vercue:invalidConceptReference", _("%(event)s %(relSet)s relationship fromConcept %(conceptFrom)s does not reference a concept in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, conceptFrom=relationship.fromName) relationshipSetValid = False if relationship.toName and relationship.toConcept is None: self.modelVersReport.error("vercue:invalidConceptReference", _("%(event)s %(relSet)s relationship toConcept %(conceptTo)s does not reference a concept in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, conceptTo=relationship.toName) relationshipSetValid = False if relationshipSetValid: if relationship.fromRelationship is None: if relationship.toName: self.modelVersReport.error("verrelse:invalidRelationshipReference", _("%(event)s %(relSet)s no relationship found from fromConcept %(conceptFrom)s to toConcept %(conceptTo)s in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, conceptFrom=relationship.fromName, conceptTo=relationship.toName) else: self.modelVersReport.error("verrelse:invalidRelationshipReference", _("%(event)s %(relSet)s no relationship found fromConcept %(conceptFrom)s in its DTS"), modelObject=relSetChange, event=relSetChange.name, relSet=name, conceptFrom=relationship.fromName) for iaChange in versReport.instanceAspectChanges: for instAspects in (iaChange.fromAspects, iaChange.toAspects): if instAspects is not None and instAspects.aspects: dimAspectElts = {} for aspect in instAspects.aspects: dts = aspect.modelAspects.dts if (aspect.localName in ("explicitDimension", "typedDimension") and aspect.concept is None): self.modelVersReport.error("vercue:invalidConceptReference", _("%(event)s dimension %(dimension)s is not a concept in its DTS"), modelObject=aspect, event=iaChange.name, dimension=aspect.conceptName) elif aspect.localName == "explicitDimension": dimConcept = aspect.concept if not dimConcept.isExplicitDimension: self.modelVersReport.error("verdime:invalidExplicitDimensionIdentifier", _("%(event)s dimension %(dimension)s is not an explicit dimension in its DTS"), modelObject=aspect, event=iaChange.name, dimension=aspect.conceptName) if dimConcept in dimAspectElts: self.modelVersReport.error("verdime:duplicateExplicitDimensionAspect", _("%(event)s dimension %(dimension)s is duplicated in a single explicitDimension element"), modelObject=(aspect, dimAspectElts[dimConcept]), event=iaChange.name, dimension=aspect.conceptName) else: dimAspectElts[dimConcept] = aspect elif aspect.localName == "typedDimension": dimConcept = aspect.concept if not dimConcept.isTypedDimension: self.modelVersReport.error("verdime:invalidTypedDimensionIdentifier", _("%(event)s dimension %(dimension)s is not a typed dimension in its DTS"), modelObject=aspect, event=iaChange.name, dimension=aspect.conceptName) if dimConcept in dimAspectElts: self.modelVersReport.error("verdime:duplicateTypedDimensionAspect", _("%(event)s dimension %(dimension)s is duplicated in a single explicitDimension element"), modelObject=(aspect, dimAspectElts[dimConcept]), event=iaChange.name, dimension=aspect.conceptName) else: dimAspectElts[dimConcept] = aspect if aspect.localName in ("explicitDimension", "concepts"): for relatedConcept in aspect.relatedConcepts: conceptMdlObj = relatedConcept.concept if conceptMdlObj is None or not conceptMdlObj.isItem: self.modelVersReport.error("vercue:invalidConceptReference", _("%(event)s concept %(concept)s is not an item in its DTS"), modelObject=aspect, event=iaChange.name, concept=relatedConcept.conceptName) if relatedConcept.arcrole is not None: if (not XbrlConst.isStandardArcrole(relatedConcept.arcrole) and relatedConcept.arcrole not in dts.arcroleTypes): self.modelVersReport.error("verdime:invalidURI", _("%(event)s arcrole %(arcrole)s is not defined in its DTS"), modelObject=aspect, event=iaChange.name, arcrole=relatedConcept.arcrole) elif not any(arcrole == relatedConcept.arcrole for arcrole, linkrole, linkqname, arcqname in dts.baseSets.keys()): self.modelVersReport.error("verdime:invalidURI", _("%(event)s arcrole %(arcrole)s is not used in its DTS"), modelObject=aspect, event=iaChange.name, linkrole=relatedConcept.arcrole) if relatedConcept.linkrole is not None: if (relatedConcept.linkrole != "http://www.xbrl.org/2003/role/link" and relatedConcept.linkrole not in dts.roleTypes): self.modelVersReport.error("verdime:invalidURI", _("%(event)s linkrole %(linkrole)s is not defined in its DTS"), modelObject=aspect, event=iaChange.name, linkrole=relatedConcept.linkrole) elif not any(linkrole == relatedConcept.linkrole for arcrole, linkrole, linkqname, arcqname in dts.baseSets.keys()): self.modelVersReport.error("verdime:invalidURI", _("%(event)s linkrole %(linkrole)s is not used in its DTS"), modelObject=aspect, event=iaChange.name, linkrole=relatedConcept.linkrole) if (relatedConcept.arc is not None and (relatedConcept.arc not in dts.qnameConcepts or (dts.qnameConcepts[relatedConcept.arc].type is not None and not dts.qnameConcepts[relatedConcept.arc].type.isDerivedFrom(XbrlConst.qnXlArcType)))): self.modelVersReport.error("verdime:invalidArcElement", _("%(event)s arc %(arc)s is not defined as an arc in its DTS"), modelObject=aspect, event=iaChange.name, arc=relatedConcept.arc) if (relatedConcept.link is not None and (relatedConcept.link not in dts.qnameConcepts or (dts.qnameConcepts[relatedConcept.link].type is not None and not dts.qnameConcepts[relatedConcept.link].type.isDerivedFrom(XbrlConst.qnXlExtendedType)))): self.modelVersReport.error("verdime:invalidLinkElement", _("%(event)s link %(link)s is not defined in its DTS"), modelObject=aspect, event=iaChange.name, link=relatedConcept.link) self.close()
true
true
1c45bee0b72f7290f98a152d2fd4047f74e16502
8,482
py
Python
inbm/dispatcher-agent/dispatcher/fota/fota.py
intel/intel-inb-manageability
cdb17765120857fd41cacb838d6ee6e34e1f5047
[ "Apache-2.0" ]
5
2021-12-13T21:19:31.000Z
2022-01-18T18:29:43.000Z
inbm/dispatcher-agent/dispatcher/fota/fota.py
intel/intel-inb-manageability
cdb17765120857fd41cacb838d6ee6e34e1f5047
[ "Apache-2.0" ]
45
2021-12-30T17:21:09.000Z
2022-03-29T22:47:32.000Z
inbm/dispatcher-agent/dispatcher/fota/fota.py
intel/intel-inb-manageability
cdb17765120857fd41cacb838d6ee6e34e1f5047
[ "Apache-2.0" ]
4
2022-01-26T17:42:54.000Z
2022-03-30T04:48:04.000Z
""" FOTA update tool which is called from the dispatcher during installation Copyright (C) 2017-2022 Intel Corporation SPDX-License-Identifier: Apache-2.0 """ import logging import os import platform from threading import Timer from typing import Any, Optional, Mapping from future.moves.urllib.parse import urlparse from inbm_common_lib.exceptions import UrlSecurityException from inbm_common_lib.utility import canonicalize_uri from inbm_common_lib.constants import REMOTE_SOURCE from .constants import * from .fota_error import FotaError from .manifest import parse_tool_options, parse_guid, parse_hold_reboot_flag from .os_factory import OsFactory, OsType from ..common import dispatcher_state from ..common.result_constants import * from ..constants import UMASK_OTA from ..dispatcher_callbacks import DispatcherCallbacks from ..dispatcher_exception import DispatcherException from ..downloader import download from ..packagemanager.local_repo import DirectoryRepo logger = logging.getLogger(__name__) class FOTA: """AKA FOTA Tool An instance of this class will be called from the dispatcher if the requested type of update is FOTA """ def __init__(self, parsed_manifest: Mapping[str, Optional[Any]], repo_type: str, dispatcher_callbacks: DispatcherCallbacks) -> None: """Base class constructor for variable assignment, to send telemetry info and create a new directory if no repo is present @param parsed_manifest: Parsed parameters from manifest @param repo_type: OTA source location -> local or remote @param dispatcher_callbacks: DispatcherCallbacks instance """ logger.debug(f"parsed_manifest: {parsed_manifest}") self._ota_element = parsed_manifest.get('resource') logger.debug(f"ota_element: {self._ota_element}") self._dispatcher_callbacks = dispatcher_callbacks self._uri: Optional[str] = parsed_manifest['uri'] self._repo_type = repo_type repo_path: Optional[str] """If repo_type=local, then use path and not URI""" if self._repo_type == REMOTE_SOURCE: if not self._uri: raise FotaError("missing URI.") else: self._pkg_filename = os.path.basename(urlparse(self._uri).path) repo_path = None else: if self._ota_element is None or 'path' not in self._ota_element: raise FotaError('attempting to use local repo for FOTA but no path specified') self._pkg_filename = os.path.basename(self._ota_element['path']) path = self._ota_element.get('path', None) logger.debug(f"path: {path}") if path is None: repo_path = None else: repo_path = os.path.dirname(path) logger.debug(f"repo_path: {repo_path}") self.__signature = parsed_manifest['signature'] self._hash_algorithm = parsed_manifest['hash_algorithm'] self._username = parsed_manifest['username'] self._password = parsed_manifest['password'] if self._dispatcher_callbacks is None: raise FotaError("dispatcher_callbacks not specified in FOTA constructor") self._dispatcher_callbacks.broker_core.telemetry("Firmware Update Tool launched") if repo_path: logger.debug("Using manifest specified repo path") self._repo = DirectoryRepo(repo_path) else: logger.debug("Using default repo path") self._repo = DirectoryRepo(CACHE) def install(self) -> Result: """checks current platform versions and then issues download and install. Performs clean() in failure conditions @return: (Result) containing status code and message """ logger.debug("") return_message: Result = Result() hold_reboot = False try: factory = OsFactory.get_factory( self._verify_os_supported(), self._ota_element, self._dispatcher_callbacks) bios_vendor, platform_product = factory.create_upgrade_checker().check() if self._repo_type.lower() == REMOTE_SOURCE: # need to perform this check here because some FOTA commands don't have a URI -- see constructor # (instead they have a path) if self._uri is None: raise FotaError( "internal error: _uri uninitialized in Fota.install with download requested in manifest") uri = canonicalize_uri(self._uri) download(dispatcher_callbacks=self._dispatcher_callbacks, uri=uri, repo=self._repo, umask=UMASK_OTA, username=self._username, password=self._password) else: logger.debug("Skipping FOTA upgradable check for local repo") if self._ota_element is None: raise FotaError("missing ota_element") tool_options = parse_tool_options(self._ota_element) logger.debug(f"tool_options: {tool_options}") guid = parse_guid(self._ota_element) logger.debug(f"guid: {guid}") hold_reboot = parse_hold_reboot_flag(self._ota_element) logger.debug(f"holdReboot: {hold_reboot}; pkg_filename: {self._pkg_filename}") factory.create_installer(self._repo, FOTA_CONF_PATH, FOTA_CONF_SCHEMA_LOC).\ install(guid=guid, tool_options=tool_options, pkg_filename=self._pkg_filename, signature=self.__signature, hash_algorithm=self._hash_algorithm, bios_vendor=bios_vendor, platform_product=platform_product) def trigger_reboot() -> None: """This method triggers a reboot.""" factory.create_rebooter().reboot() if not hold_reboot: logger.debug("") state = {'restart_reason': "fota"} dispatcher_state.write_dispatcher_state_to_state_file(state) time_to_trigger_reboot = Timer(0.1, trigger_reboot) time_to_trigger_reboot.start() return_message = COMMAND_SUCCESS else: status = 'Reboot on hold after Firmware update...' state = {'restart_reason': "pota"} dispatcher_state.write_dispatcher_state_to_state_file(state) logger.debug(status) self._dispatcher_callbacks.broker_core.telemetry(status) except (DispatcherException, FotaError, UrlSecurityException, ValueError, FileNotFoundError) as e: error = 'Firmware Update Aborted: ' + str(e) logger.error(error) self._dispatcher_callbacks.broker_core.telemetry(error) return_message = INSTALL_FAILURE self._repo.delete(self._pkg_filename) # In POTA, mender file needs to be deleted also. if hold_reboot: self._repo.delete_all() finally: if return_message == COMMAND_SUCCESS: status = 'Firmware update in process...' else: status = 'Firmware Update Aborted' dispatcher_state.clear_dispatcher_state() logger.debug('Firmware update status: ' + status) self._dispatcher_callbacks.broker_core.telemetry(status) return return_message @staticmethod def _verify_os_supported(): """checks if the current OS is supported. @return True if OS is supported; otherwise, false. @raise ValueError Unsupported OS """ logger.debug("") os_type = platform.system() logger.debug(f"os_type: {os_type}") if os_type in OsType.__members__: return os_type else: logger.error("Unsupported OS type.") raise ValueError('Unsupported OS type.') def check(self) -> None: """validate the manifest before FOTA""" logger.debug("") factory = OsFactory.get_factory( self._verify_os_supported(), self._ota_element, self._dispatcher_callbacks) factory.create_upgrade_checker().check()
42.838384
113
0.630512
import logging import os import platform from threading import Timer from typing import Any, Optional, Mapping from future.moves.urllib.parse import urlparse from inbm_common_lib.exceptions import UrlSecurityException from inbm_common_lib.utility import canonicalize_uri from inbm_common_lib.constants import REMOTE_SOURCE from .constants import * from .fota_error import FotaError from .manifest import parse_tool_options, parse_guid, parse_hold_reboot_flag from .os_factory import OsFactory, OsType from ..common import dispatcher_state from ..common.result_constants import * from ..constants import UMASK_OTA from ..dispatcher_callbacks import DispatcherCallbacks from ..dispatcher_exception import DispatcherException from ..downloader import download from ..packagemanager.local_repo import DirectoryRepo logger = logging.getLogger(__name__) class FOTA: def __init__(self, parsed_manifest: Mapping[str, Optional[Any]], repo_type: str, dispatcher_callbacks: DispatcherCallbacks) -> None: logger.debug(f"parsed_manifest: {parsed_manifest}") self._ota_element = parsed_manifest.get('resource') logger.debug(f"ota_element: {self._ota_element}") self._dispatcher_callbacks = dispatcher_callbacks self._uri: Optional[str] = parsed_manifest['uri'] self._repo_type = repo_type repo_path: Optional[str] if self._repo_type == REMOTE_SOURCE: if not self._uri: raise FotaError("missing URI.") else: self._pkg_filename = os.path.basename(urlparse(self._uri).path) repo_path = None else: if self._ota_element is None or 'path' not in self._ota_element: raise FotaError('attempting to use local repo for FOTA but no path specified') self._pkg_filename = os.path.basename(self._ota_element['path']) path = self._ota_element.get('path', None) logger.debug(f"path: {path}") if path is None: repo_path = None else: repo_path = os.path.dirname(path) logger.debug(f"repo_path: {repo_path}") self.__signature = parsed_manifest['signature'] self._hash_algorithm = parsed_manifest['hash_algorithm'] self._username = parsed_manifest['username'] self._password = parsed_manifest['password'] if self._dispatcher_callbacks is None: raise FotaError("dispatcher_callbacks not specified in FOTA constructor") self._dispatcher_callbacks.broker_core.telemetry("Firmware Update Tool launched") if repo_path: logger.debug("Using manifest specified repo path") self._repo = DirectoryRepo(repo_path) else: logger.debug("Using default repo path") self._repo = DirectoryRepo(CACHE) def install(self) -> Result: logger.debug("") return_message: Result = Result() hold_reboot = False try: factory = OsFactory.get_factory( self._verify_os_supported(), self._ota_element, self._dispatcher_callbacks) bios_vendor, platform_product = factory.create_upgrade_checker().check() if self._repo_type.lower() == REMOTE_SOURCE: # (instead they have a path) if self._uri is None: raise FotaError( "internal error: _uri uninitialized in Fota.install with download requested in manifest") uri = canonicalize_uri(self._uri) download(dispatcher_callbacks=self._dispatcher_callbacks, uri=uri, repo=self._repo, umask=UMASK_OTA, username=self._username, password=self._password) else: logger.debug("Skipping FOTA upgradable check for local repo") if self._ota_element is None: raise FotaError("missing ota_element") tool_options = parse_tool_options(self._ota_element) logger.debug(f"tool_options: {tool_options}") guid = parse_guid(self._ota_element) logger.debug(f"guid: {guid}") hold_reboot = parse_hold_reboot_flag(self._ota_element) logger.debug(f"holdReboot: {hold_reboot}; pkg_filename: {self._pkg_filename}") factory.create_installer(self._repo, FOTA_CONF_PATH, FOTA_CONF_SCHEMA_LOC).\ install(guid=guid, tool_options=tool_options, pkg_filename=self._pkg_filename, signature=self.__signature, hash_algorithm=self._hash_algorithm, bios_vendor=bios_vendor, platform_product=platform_product) def trigger_reboot() -> None: factory.create_rebooter().reboot() if not hold_reboot: logger.debug("") state = {'restart_reason': "fota"} dispatcher_state.write_dispatcher_state_to_state_file(state) time_to_trigger_reboot = Timer(0.1, trigger_reboot) time_to_trigger_reboot.start() return_message = COMMAND_SUCCESS else: status = 'Reboot on hold after Firmware update...' state = {'restart_reason': "pota"} dispatcher_state.write_dispatcher_state_to_state_file(state) logger.debug(status) self._dispatcher_callbacks.broker_core.telemetry(status) except (DispatcherException, FotaError, UrlSecurityException, ValueError, FileNotFoundError) as e: error = 'Firmware Update Aborted: ' + str(e) logger.error(error) self._dispatcher_callbacks.broker_core.telemetry(error) return_message = INSTALL_FAILURE self._repo.delete(self._pkg_filename) # In POTA, mender file needs to be deleted also. if hold_reboot: self._repo.delete_all() finally: if return_message == COMMAND_SUCCESS: status = 'Firmware update in process...' else: status = 'Firmware Update Aborted' dispatcher_state.clear_dispatcher_state() logger.debug('Firmware update status: ' + status) self._dispatcher_callbacks.broker_core.telemetry(status) return return_message @staticmethod def _verify_os_supported(): logger.debug("") os_type = platform.system() logger.debug(f"os_type: {os_type}") if os_type in OsType.__members__: return os_type else: logger.error("Unsupported OS type.") raise ValueError('Unsupported OS type.') def check(self) -> None: logger.debug("") factory = OsFactory.get_factory( self._verify_os_supported(), self._ota_element, self._dispatcher_callbacks) factory.create_upgrade_checker().check()
true
true
1c45bf70eca6a992410fb3243e168ae272e4fd35
1,699
py
Python
coding_interviews/elements_of_programming_interview/delete_duplicates_from_a_sorted_array.py
LeandroTk/Algorithms
569ed68eba3eeff902f8078992099c28ce4d7cd6
[ "MIT" ]
205
2018-12-01T17:49:49.000Z
2021-12-22T07:02:27.000Z
coding_interviews/elements_of_programming_interview/delete_duplicates_from_a_sorted_array.py
LeandroTk/Algorithms
569ed68eba3eeff902f8078992099c28ce4d7cd6
[ "MIT" ]
2
2020-01-01T16:34:29.000Z
2020-04-26T19:11:13.000Z
coding_interviews/elements_of_programming_interview/delete_duplicates_from_a_sorted_array.py
LeandroTk/Algorithms
569ed68eba3eeff902f8078992099c28ce4d7cd6
[ "MIT" ]
50
2018-11-28T20:51:36.000Z
2021-11-29T04:08:25.000Z
# input: [2,3,5,5,7,11,11,11,13] # output: [2,3,5,7,11,13,0,0,0] # input: [-2,-2,1] # output: [-2,1,0] # input: [0,0,1,1] # output: [0,1,0,0] ''' result = [] counter = {} loop input is not in the counter counter[number] << True result << number n = len(input) - len(result) loop n result << 0 return result Space: O(2N) = O(N) Runtime: O(N) ''' def delete_duplicates(numbers): if not numbers: return [[], 0] result = [] counter_mapper = {} counter = 0 for number in numbers: if number not in counter_mapper: counter_mapper[number] = True result.append(number) counter += 1 difference_of_lengths = len(numbers) - len(result) for _ in range(difference_of_lengths): result.append(0) return [result, counter] def test(input, expect): print(delete_duplicates(input) == expect) test([2,3,5,5,7,11,11,11,13], [[2,3,5,7,11,13,0,0,0],6]) test([-2,-2,1], [[-2,1,0],2]) test([0,0,1,1], [[0,1,0,0],2]) test([], [[],0]) def delete_duplicates_2(numbers): counter = 1 if not numbers: return [[], 0] for index in range(1, len(numbers)): if numbers[index - 1] != numbers[index]: numbers[counter] = numbers[index] counter += 1 difference_of_lengths = len(numbers) - counter for index in range(difference_of_lengths): numbers[len(numbers) - index - 1] = 0 return [numbers, counter] def test_2(input, expect): print(delete_duplicates(input) == expect) test_2([2,3,5,5,7,11,11,11,13], [[2,3,5,7,11,13,0,0,0],6]) test_2([-2,-2,1], [[-2,1,0],2]) test_2([0,0,1,1], [[0,1,0,0],2]) test_2([], [[],0])
20.22619
58
0.566215
def delete_duplicates(numbers): if not numbers: return [[], 0] result = [] counter_mapper = {} counter = 0 for number in numbers: if number not in counter_mapper: counter_mapper[number] = True result.append(number) counter += 1 difference_of_lengths = len(numbers) - len(result) for _ in range(difference_of_lengths): result.append(0) return [result, counter] def test(input, expect): print(delete_duplicates(input) == expect) test([2,3,5,5,7,11,11,11,13], [[2,3,5,7,11,13,0,0,0],6]) test([-2,-2,1], [[-2,1,0],2]) test([0,0,1,1], [[0,1,0,0],2]) test([], [[],0]) def delete_duplicates_2(numbers): counter = 1 if not numbers: return [[], 0] for index in range(1, len(numbers)): if numbers[index - 1] != numbers[index]: numbers[counter] = numbers[index] counter += 1 difference_of_lengths = len(numbers) - counter for index in range(difference_of_lengths): numbers[len(numbers) - index - 1] = 0 return [numbers, counter] def test_2(input, expect): print(delete_duplicates(input) == expect) test_2([2,3,5,5,7,11,11,11,13], [[2,3,5,7,11,13,0,0,0],6]) test_2([-2,-2,1], [[-2,1,0],2]) test_2([0,0,1,1], [[0,1,0,0],2]) test_2([], [[],0])
true
true
1c45bfbfe06e66c030a706f0763fdf1865d626d3
1,904
py
Python
map/views.py
alzseven/djeju
5aade103dd97999dd7b5f97c461aeccbfb0ea23e
[ "MIT" ]
null
null
null
map/views.py
alzseven/djeju
5aade103dd97999dd7b5f97c461aeccbfb0ea23e
[ "MIT" ]
2
2021-06-04T23:32:09.000Z
2021-06-10T19:39:20.000Z
map/views.py
alzseven/djeju
5aade103dd97999dd7b5f97c461aeccbfb0ea23e
[ "MIT" ]
null
null
null
from django.shortcuts import render from django.template import Context import json import requests from map.models import Hospitals from django.contrib.gis.geos import fromstr from django.contrib.gis.db.models.functions import Distance # Create your views here. def maskmap(request): # djangoReq cur_lat = request.GET.get('lat') cur_lng = request.GET.get('lng') lvl = int(request.GET.get("level")) dis = 0 if(lvl>0 and lvl<5): dis = 125 * 2**(lvl+1) elif(lvl>=5): dis = 5000 # else: # #invalid value # dis = 0 apiReqtxt = "lat="+ str(cur_lat) + "&lng=" + str(cur_lng) + "&m=" + str(dis) url = "https://8oi9s0nnth.apigw.ntruss.com/corona19-masks/v1/storesByGeo/json?" + apiReqtxt result = requests.get(url).text data = Context( {"lat":float(cur_lat), "lng":float(cur_lng), "lvl":int(lvl), "strdata":str(result) }) return render(request, 'map/maskstore.html', {'strdata':data}) def hospmap(request): cur_lat = request.GET.get('lat') cur_lng = request.GET.get('lng') lvl = int(request.GET.get("level")) dis = 0 if(lvl>0 and lvl<5): dis = 250 * 2**(lvl+1) elif(lvl>=5): dis = 10000 #TODO:Set New Max # else: # #invalid value # dis = 0 user_location = fromstr(f'POINT({float(cur_lng)} {float(cur_lat)})', srid=4326) qs = Hospitals.objects.filter(location__distance_lte=(user_location, dis))\ .values('latitude','longtitude','yadmNm','hospTyTpCd','telno','adtFrDd','isReliefhos','isInspect','isTriage') data = Context( {"lat":float(cur_lat), "lng":float(cur_lng), "lvl":int(lvl), "hosdata":json.dumps(list(qs), ensure_ascii=False, default=str) }) #TODO:Filtering at view? return render(request, 'map/hospital.html', {'data':data})
27.594203
117
0.605567
from django.shortcuts import render from django.template import Context import json import requests from map.models import Hospitals from django.contrib.gis.geos import fromstr from django.contrib.gis.db.models.functions import Distance def maskmap(request): cur_lat = request.GET.get('lat') cur_lng = request.GET.get('lng') lvl = int(request.GET.get("level")) dis = 0 if(lvl>0 and lvl<5): dis = 125 * 2**(lvl+1) elif(lvl>=5): dis = 5000 Reqtxt = "lat="+ str(cur_lat) + "&lng=" + str(cur_lng) + "&m=" + str(dis) url = "https://8oi9s0nnth.apigw.ntruss.com/corona19-masks/v1/storesByGeo/json?" + apiReqtxt result = requests.get(url).text data = Context( {"lat":float(cur_lat), "lng":float(cur_lng), "lvl":int(lvl), "strdata":str(result) }) return render(request, 'map/maskstore.html', {'strdata':data}) def hospmap(request): cur_lat = request.GET.get('lat') cur_lng = request.GET.get('lng') lvl = int(request.GET.get("level")) dis = 0 if(lvl>0 and lvl<5): dis = 250 * 2**(lvl+1) elif(lvl>=5): dis = 10000 r_location = fromstr(f'POINT({float(cur_lng)} {float(cur_lat)})', srid=4326) qs = Hospitals.objects.filter(location__distance_lte=(user_location, dis))\ .values('latitude','longtitude','yadmNm','hospTyTpCd','telno','adtFrDd','isReliefhos','isInspect','isTriage') data = Context( {"lat":float(cur_lat), "lng":float(cur_lng), "lvl":int(lvl), "hosdata":json.dumps(list(qs), ensure_ascii=False, default=str) }) return render(request, 'map/hospital.html', {'data':data})
true
true
1c45c0b5f0f1b4ac58ff0d930371bca1e8a86c2c
31,428
py
Python
boto/gs/key.py
dreamhost/boto
57eaacfc66acd7083641ef504857786a12e330ff
[ "MIT" ]
null
null
null
boto/gs/key.py
dreamhost/boto
57eaacfc66acd7083641ef504857786a12e330ff
[ "MIT" ]
null
null
null
boto/gs/key.py
dreamhost/boto
57eaacfc66acd7083641ef504857786a12e330ff
[ "MIT" ]
null
null
null
# Copyright 2010 Google Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import base64 import binascii import os import re import StringIO from boto.exception import BotoClientError from boto.s3.key import Key as S3Key from boto.s3.keyfile import KeyFile class Key(S3Key): """ Represents a key (object) in a GS bucket. :ivar bucket: The parent :class:`boto.gs.bucket.Bucket`. :ivar name: The name of this Key object. :ivar metadata: A dictionary containing user metadata that you wish to store with the object or that has been retrieved from an existing object. :ivar cache_control: The value of the `Cache-Control` HTTP header. :ivar content_type: The value of the `Content-Type` HTTP header. :ivar content_encoding: The value of the `Content-Encoding` HTTP header. :ivar content_disposition: The value of the `Content-Disposition` HTTP header. :ivar content_language: The value of the `Content-Language` HTTP header. :ivar etag: The `etag` associated with this object. :ivar last_modified: The string timestamp representing the last time this object was modified in GS. :ivar owner: The ID of the owner of this object. :ivar storage_class: The storage class of the object. Currently, one of: STANDARD | DURABLE_REDUCED_AVAILABILITY. :ivar md5: The MD5 hash of the contents of the object. :ivar size: The size, in bytes, of the object. :ivar generation: The generation number of the object. :ivar meta_generation: The generation number of the object metadata. :ivar encrypted: Whether the object is encrypted while at rest on the server. """ generation = None meta_generation = None def endElement(self, name, value, connection): if name == 'Key': self.name = value elif name == 'ETag': self.etag = value elif name == 'IsLatest': if value == 'true': self.is_latest = True else: self.is_latest = False elif name == 'LastModified': self.last_modified = value elif name == 'Size': self.size = int(value) elif name == 'StorageClass': self.storage_class = value elif name == 'Owner': pass elif name == 'VersionId': self.version_id = value elif name == 'Generation': self.generation = value elif name == 'MetaGeneration': self.meta_generation = value else: setattr(self, name, value) def handle_version_headers(self, resp, force=False): self.meta_generation = resp.getheader('x-goog-metageneration', None) self.generation = resp.getheader('x-goog-generation', None) def get_file(self, fp, headers=None, cb=None, num_cb=10, torrent=False, version_id=None, override_num_retries=None, response_headers=None): query_args = None if self.generation: query_args = ['generation=%s' % self.generation] self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, override_num_retries=override_num_retries, response_headers=response_headers, query_args=query_args) def delete(self): return self.bucket.delete_key(self.name, version_id=self.version_id, generation=self.generation) def add_email_grant(self, permission, email_address): """ Convenience method that provides a quick way to add an email grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type email_address: string :param email_address: The email address associated with the Google account to which you are granting the permission. """ acl = self.get_acl() acl.add_email_grant(permission, email_address) self.set_acl(acl) def add_user_grant(self, permission, user_id): """ Convenience method that provides a quick way to add a canonical user grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type user_id: string :param user_id: The canonical user id associated with the GS account to which you are granting the permission. """ acl = self.get_acl() acl.add_user_grant(permission, user_id) self.set_acl(acl) def add_group_email_grant(self, permission, email_address, headers=None): """ Convenience method that provides a quick way to add an email group grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type email_address: string :param email_address: The email address associated with the Google Group to which you are granting the permission. """ acl = self.get_acl(headers=headers) acl.add_group_email_grant(permission, email_address) self.set_acl(acl, headers=headers) def add_group_grant(self, permission, group_id): """ Convenience method that provides a quick way to add a canonical group grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type group_id: string :param group_id: The canonical group id associated with the Google Groups account you are granting the permission to. """ acl = self.get_acl() acl.add_group_grant(permission, group_id) self.set_acl(acl) def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, res_upload_handler=None, size=None, rewind=False, if_generation=None): """ Store an object in GS using the name of the Key object as the key in GS and the contents of the file pointed to by 'fp' as the contents. :type fp: file :param fp: the file whose contents are to be uploaded :type headers: dict :param headers: additional HTTP headers to be sent with the PUT request. :type replace: bool :param replace: If this parameter is False, the method will first check to see if an object exists in the bucket with the same key. If it does, it won't overwrite it. The default value is True which will overwrite the object. :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to GS and the second representing the total number of bytes that need to be transmitted. :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter, this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.gs.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in GS. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: If you need to compute the MD5 for any reason prior to upload, it's silly to have to do it twice so this param, if present, will be used as the MD5 values of the file. Otherwise, the checksum will be computed. :type res_upload_handler: ResumableUploadHandler :param res_upload_handler: If provided, this handler will perform the upload. :type size: int :param size: (optional) The Maximum number of bytes to read from the file pointer (fp). This is useful when uploading a file in multiple parts where you are splitting the file up into different ranges to be uploaded. If not specified, the default behaviour is to read all bytes from the file pointer. Less bytes may be available. Notes: 1. The "size" parameter currently cannot be used when a resumable upload handler is given but is still useful for uploading part of a file as implemented by the parent class. 2. At present Google Cloud Storage does not support multipart uploads. :type rewind: bool :param rewind: (optional) If True, the file pointer (fp) will be rewound to the start before any bytes are read from it. The default behaviour is False which reads from the current position of the file pointer (fp). :type if_generation: int :param if_generation: (optional) If set to a generation number, the object will only be written to if its current generation number is this value. If set to the value 0, the object will only be written if it doesn't already exist. :rtype: int :return: The number of bytes written to the key. TODO: At some point we should refactor the Bucket and Key classes, to move functionality common to all providers into a parent class, and provider-specific functionality into subclasses (rather than just overriding/sharing code the way it currently works). """ provider = self.bucket.connection.provider if res_upload_handler and size: # could use size instead of file_length if provided but... raise BotoClientError('"size" param not supported for resumable uploads.') headers = headers or {} if policy: headers[provider.acl_header] = policy if rewind: # caller requests reading from beginning of fp. fp.seek(0, os.SEEK_SET) else: # The following seek/tell/seek logic is intended # to detect applications using the older interface to # set_contents_from_file(), which automatically rewound the # file each time the Key was reused. This changed with commit # 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads # split into multiple parts and uploaded in parallel, and at # the time of that commit this check was added because otherwise # older programs would get a success status and upload an empty # object. Unfortuantely, it's very inefficient for fp's implemented # by KeyFile (used, for example, by gsutil when copying between # providers). So, we skip the check for the KeyFile case. # TODO: At some point consider removing this seek/tell/seek # logic, after enough time has passed that it's unlikely any # programs remain that assume the older auto-rewind interface. if not isinstance(fp, KeyFile): spos = fp.tell() fp.seek(0, os.SEEK_END) if fp.tell() == spos: fp.seek(0, os.SEEK_SET) if fp.tell() != spos: # Raise an exception as this is likely a programming # error whereby there is data before the fp but nothing # after it. fp.seek(spos) raise AttributeError('fp is at EOF. Use rewind option ' 'or seek() to data start.') # seek back to the correct position. fp.seek(spos) if hasattr(fp, 'name'): self.path = fp.name if self.bucket != None: if isinstance(fp, KeyFile): # Avoid EOF seek for KeyFile case as it's very inefficient. key = fp.getkey() size = key.size - fp.tell() self.size = size # At present both GCS and S3 use MD5 for the etag for # non-multipart-uploaded objects. If the etag is 32 hex # chars use it as an MD5, to avoid having to read the file # twice while transferring. if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)): etag = key.etag.strip('"') md5 = (etag, base64.b64encode(binascii.unhexlify(etag))) if size: self.size = size else: # If md5 is provided, still need to size so # calculate based on bytes to end of content spos = fp.tell() fp.seek(0, os.SEEK_END) self.size = fp.tell() - spos fp.seek(spos) size = self.size if md5 == None: md5 = self.compute_md5(fp, size) self.md5 = md5[0] self.base64md5 = md5[1] if self.name == None: self.name = self.md5 if not replace: if self.bucket.lookup(self.name): return if if_generation is not None: headers['x-goog-if-generation-match'] = str(if_generation) if res_upload_handler: res_upload_handler.send_file(self, fp, headers, cb, num_cb) else: # Not a resumable transfer so use basic send_file mechanism. self.send_file(fp, headers, cb, num_cb, size=size) def set_contents_from_filename(self, filename, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, reduced_redundancy=None, res_upload_handler=None, if_generation=None): """ Store an object in GS using the name of the Key object as the key in GS and the contents of the file named by 'filename'. See set_contents_from_file method for details about the parameters. :type filename: string :param filename: The name of the file that you want to put onto GS :type headers: dict :param headers: Additional headers to pass along with the request to GS. :type replace: bool :param replace: If True, replaces the contents of the file if it already exists. :type cb: function :param cb: (optional) a callback function that will be called to report progress on the download. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted from GS and the second representing the total number of bytes that need to be transmitted. :type cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.gs.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in GS. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: If you need to compute the MD5 for any reason prior to upload, it's silly to have to do it twice so this param, if present, will be used as the MD5 values of the file. Otherwise, the checksum will be computed. :type res_upload_handler: ResumableUploadHandler :param res_upload_handler: If provided, this handler will perform the upload. :type if_generation: int :param if_generation: (optional) If set to a generation number, the object will only be written to if its current generation number is this value. If set to the value 0, the object will only be written if it doesn't already exist. """ # Clear out any previously computed md5 hashes, since we are setting the content. self.md5 = None self.base64md5 = None fp = open(filename, 'rb') self.set_contents_from_file(fp, headers, replace, cb, num_cb, policy, md5, res_upload_handler, if_generation=if_generation) fp.close() def set_contents_from_string(self, s, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, if_generation=None): """ Store an object in S3 using the name of the Key object as the key in S3 and the string 's' as the contents. See set_contents_from_file method for details about the parameters. :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type replace: bool :param replace: If True, replaces the contents of the file if it already exists. :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to S3 and the second representing the size of the to be transmitted object. :type cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: If you need to compute the MD5 for any reason prior to upload, it's silly to have to do it twice so this param, if present, will be used as the MD5 values of the file. Otherwise, the checksum will be computed. :type if_generation: int :param if_generation: (optional) If set to a generation number, the object will only be written to if its current generation number is this value. If set to the value 0, the object will only be written if it doesn't already exist. """ # Clear out any previously computed md5 hashes, since we are setting the content. self.md5 = None self.base64md5 = None if isinstance(s, unicode): s = s.encode("utf-8") fp = StringIO.StringIO(s) r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, policy, md5, if_generation=if_generation) fp.close() return r def set_contents_from_stream(self, *args, **kwargs): """ Store an object using the name of the Key object as the key in cloud and the contents of the data stream pointed to by 'fp' as the contents. The stream object is not seekable and total size is not known. This has the implication that we can't specify the Content-Size and Content-MD5 in the header. So for huge uploads, the delay in calculating MD5 is avoided but with a penalty of inability to verify the integrity of the uploaded data. :type fp: file :param fp: the file whose contents are to be uploaded :type headers: dict :param headers: additional HTTP headers to be sent with the PUT request. :type replace: bool :param replace: If this parameter is False, the method will first check to see if an object exists in the bucket with the same key. If it does, it won't overwrite it. The default value is True which will overwrite the object. :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to GS and the second representing the total number of bytes that need to be transmitted. :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter, this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.gs.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in GS. :type reduced_redundancy: bool :param reduced_redundancy: If True, this will set the storage class of the new Key to be REDUCED_REDUNDANCY. The Reduced Redundancy Storage (RRS) feature of S3, provides lower redundancy at lower storage cost. :type size: int :param size: (optional) The Maximum number of bytes to read from the file pointer (fp). This is useful when uploading a file in multiple parts where you are splitting the file up into different ranges to be uploaded. If not specified, the default behaviour is to read all bytes from the file pointer. Less bytes may be available. :type if_generation: int :param if_generation: (optional) If set to a generation number, the object will only be written to if its current generation number is this value. If set to the value 0, the object will only be written if it doesn't already exist. """ if_generation = kwargs.pop('if_generation', None) if if_generation is not None: headers = kwargs.get('headers', {}) headers['x-goog-if-generation-match'] = str(if_generation) kwargs['headers'] = headers return super(Key, self).set_contents_from_stream(*args, **kwargs) def set_acl(self, acl_or_str, headers=None, generation=None, if_generation=None, if_metageneration=None): """Sets the ACL for this object. :type acl_or_str: string or :class:`boto.gs.acl.ACL` :param acl_or_str: A canned ACL string (see :data:`~.gs.acl.CannedACLStrings`) or an ACL object. :type headers: dict :param headers: Additional headers to set during the request. :type generation: int :param generation: If specified, sets the ACL for a specific generation of a versioned object. If not specified, the current version is modified. :type if_generation: int :param if_generation: (optional) If set to a generation number, the acl will only be updated if its current generation number is this value. :type if_metageneration: int :param if_metageneration: (optional) If set to a metageneration number, the acl will only be updated if its current metageneration number is this value. """ if self.bucket != None: self.bucket.set_acl(acl_or_str, self.name, headers=headers, generation=generation, if_generation=if_generation, if_metageneration=if_metageneration) def get_acl(self, headers=None, generation=None): """Returns the ACL of this object. :param dict headers: Additional headers to set during the request. :param int generation: If specified, gets the ACL for a specific generation of a versioned object. If not specified, the current version is returned. :rtype: :class:`.gs.acl.ACL` """ if self.bucket != None: return self.bucket.get_acl(self.name, headers=headers, generation=generation) def get_xml_acl(self, headers=None, generation=None): """Returns the ACL string of this object. :param dict headers: Additional headers to set during the request. :param int generation: If specified, gets the ACL for a specific generation of a versioned object. If not specified, the current version is returned. :rtype: str """ if self.bucket != None: return self.bucket.get_xml_acl(self.name, headers=headers, generation=generation) def set_xml_acl(self, acl_str, headers=None, generation=None, if_generation=None, if_metageneration=None): """Sets this objects's ACL to an XML string. :type acl_str: string :param acl_str: A string containing the ACL XML. :type headers: dict :param headers: Additional headers to set during the request. :type generation: int :param generation: If specified, sets the ACL for a specific generation of a versioned object. If not specified, the current version is modified. :type if_generation: int :param if_generation: (optional) If set to a generation number, the acl will only be updated if its current generation number is this value. :type if_metageneration: int :param if_metageneration: (optional) If set to a metageneration number, the acl will only be updated if its current metageneration number is this value. """ if self.bucket != None: return self.bucket.set_xml_acl(acl_str, self.name, headers=headers, generation=generation, if_generation=if_generation, if_metageneration=if_metageneration) def set_canned_acl(self, acl_str, headers=None, generation=None, if_generation=None, if_metageneration=None): """Sets this objects's ACL using a predefined (canned) value. :type acl_str: string :param acl_str: A canned ACL string. See :data:`~.gs.acl.CannedACLStrings`. :type headers: dict :param headers: Additional headers to set during the request. :type generation: int :param generation: If specified, sets the ACL for a specific generation of a versioned object. If not specified, the current version is modified. :type if_generation: int :param if_generation: (optional) If set to a generation number, the acl will only be updated if its current generation number is this value. :type if_metageneration: int :param if_metageneration: (optional) If set to a metageneration number, the acl will only be updated if its current metageneration number is this value. """ if self.bucket != None: return self.bucket.set_canned_acl( acl_str, self.name, headers=headers, generation=generation, if_generation=if_generation, if_metageneration=if_metageneration )
45.220144
91
0.616329
import base64 import binascii import os import re import StringIO from boto.exception import BotoClientError from boto.s3.key import Key as S3Key from boto.s3.keyfile import KeyFile class Key(S3Key): generation = None meta_generation = None def endElement(self, name, value, connection): if name == 'Key': self.name = value elif name == 'ETag': self.etag = value elif name == 'IsLatest': if value == 'true': self.is_latest = True else: self.is_latest = False elif name == 'LastModified': self.last_modified = value elif name == 'Size': self.size = int(value) elif name == 'StorageClass': self.storage_class = value elif name == 'Owner': pass elif name == 'VersionId': self.version_id = value elif name == 'Generation': self.generation = value elif name == 'MetaGeneration': self.meta_generation = value else: setattr(self, name, value) def handle_version_headers(self, resp, force=False): self.meta_generation = resp.getheader('x-goog-metageneration', None) self.generation = resp.getheader('x-goog-generation', None) def get_file(self, fp, headers=None, cb=None, num_cb=10, torrent=False, version_id=None, override_num_retries=None, response_headers=None): query_args = None if self.generation: query_args = ['generation=%s' % self.generation] self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, override_num_retries=override_num_retries, response_headers=response_headers, query_args=query_args) def delete(self): return self.bucket.delete_key(self.name, version_id=self.version_id, generation=self.generation) def add_email_grant(self, permission, email_address): acl = self.get_acl() acl.add_email_grant(permission, email_address) self.set_acl(acl) def add_user_grant(self, permission, user_id): acl = self.get_acl() acl.add_user_grant(permission, user_id) self.set_acl(acl) def add_group_email_grant(self, permission, email_address, headers=None): acl = self.get_acl(headers=headers) acl.add_group_email_grant(permission, email_address) self.set_acl(acl, headers=headers) def add_group_grant(self, permission, group_id): acl = self.get_acl() acl.add_group_grant(permission, group_id) self.set_acl(acl) def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, res_upload_handler=None, size=None, rewind=False, if_generation=None): provider = self.bucket.connection.provider if res_upload_handler and size: raise BotoClientError('"size" param not supported for resumable uploads.') headers = headers or {} if policy: headers[provider.acl_header] = policy if rewind: fp.seek(0, os.SEEK_SET) else: # programs remain that assume the older auto-rewind interface. if not isinstance(fp, KeyFile): spos = fp.tell() fp.seek(0, os.SEEK_END) if fp.tell() == spos: fp.seek(0, os.SEEK_SET) if fp.tell() != spos: # Raise an exception as this is likely a programming # error whereby there is data before the fp but nothing # after it. fp.seek(spos) raise AttributeError('fp is at EOF. Use rewind option ' 'or seek() to data start.') # seek back to the correct position. fp.seek(spos) if hasattr(fp, 'name'): self.path = fp.name if self.bucket != None: if isinstance(fp, KeyFile): # Avoid EOF seek for KeyFile case as it's very inefficient. key = fp.getkey() size = key.size - fp.tell() self.size = size if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)): etag = key.etag.strip('"') md5 = (etag, base64.b64encode(binascii.unhexlify(etag))) if size: self.size = size else: # If md5 is provided, still need to size so # calculate based on bytes to end of content spos = fp.tell() fp.seek(0, os.SEEK_END) self.size = fp.tell() - spos fp.seek(spos) size = self.size if md5 == None: md5 = self.compute_md5(fp, size) self.md5 = md5[0] self.base64md5 = md5[1] if self.name == None: self.name = self.md5 if not replace: if self.bucket.lookup(self.name): return if if_generation is not None: headers['x-goog-if-generation-match'] = str(if_generation) if res_upload_handler: res_upload_handler.send_file(self, fp, headers, cb, num_cb) else: # Not a resumable transfer so use basic send_file mechanism. self.send_file(fp, headers, cb, num_cb, size=size) def set_contents_from_filename(self, filename, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, reduced_redundancy=None, res_upload_handler=None, if_generation=None): # Clear out any previously computed md5 hashes, since we are setting the content. self.md5 = None self.base64md5 = None fp = open(filename, 'rb') self.set_contents_from_file(fp, headers, replace, cb, num_cb, policy, md5, res_upload_handler, if_generation=if_generation) fp.close() def set_contents_from_string(self, s, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, if_generation=None): # Clear out any previously computed md5 hashes, since we are setting the content. self.md5 = None self.base64md5 = None if isinstance(s, unicode): s = s.encode("utf-8") fp = StringIO.StringIO(s) r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, policy, md5, if_generation=if_generation) fp.close() return r def set_contents_from_stream(self, *args, **kwargs): if_generation = kwargs.pop('if_generation', None) if if_generation is not None: headers = kwargs.get('headers', {}) headers['x-goog-if-generation-match'] = str(if_generation) kwargs['headers'] = headers return super(Key, self).set_contents_from_stream(*args, **kwargs) def set_acl(self, acl_or_str, headers=None, generation=None, if_generation=None, if_metageneration=None): if self.bucket != None: self.bucket.set_acl(acl_or_str, self.name, headers=headers, generation=generation, if_generation=if_generation, if_metageneration=if_metageneration) def get_acl(self, headers=None, generation=None): if self.bucket != None: return self.bucket.get_acl(self.name, headers=headers, generation=generation) def get_xml_acl(self, headers=None, generation=None): if self.bucket != None: return self.bucket.get_xml_acl(self.name, headers=headers, generation=generation) def set_xml_acl(self, acl_str, headers=None, generation=None, if_generation=None, if_metageneration=None): if self.bucket != None: return self.bucket.set_xml_acl(acl_str, self.name, headers=headers, generation=generation, if_generation=if_generation, if_metageneration=if_metageneration) def set_canned_acl(self, acl_str, headers=None, generation=None, if_generation=None, if_metageneration=None): if self.bucket != None: return self.bucket.set_canned_acl( acl_str, self.name, headers=headers, generation=generation, if_generation=if_generation, if_metageneration=if_metageneration )
true
true
1c45c0eac73d31615a106f4522042ae688360bab
2,494
py
Python
forum/models.py
boxed/forum
abb3699d310bf3a404f031a3cb0e4bdbf403da5a
[ "BSD-3-Clause" ]
2
2019-06-28T16:30:44.000Z
2020-12-28T01:46:52.000Z
forum/models.py
boxed/forum
abb3699d310bf3a404f031a3cb0e4bdbf403da5a
[ "BSD-3-Clause" ]
14
2019-02-26T17:25:54.000Z
2019-04-03T18:11:24.000Z
forum/models.py
boxed/forum
abb3699d310bf3a404f031a3cb0e4bdbf403da5a
[ "BSD-3-Clause" ]
1
2019-06-14T14:21:47.000Z
2019-06-14T14:21:47.000Z
from hashlib import md5 from django.contrib.auth.models import User from django.core import validators from django.db import models from iommi import register_factory from unread.models import UnreadModel class Model(models.Model): def __repr__(self): return f'{type(self)} {self.pk}:{self}' class Meta: abstract = True def get_unread_identifier(self): return f'wiki/context/{self._meta.verbose_name}:{self.pk}' class Room(Model): name = models.CharField(max_length=255) description = models.TextField(blank=True) custom_data = models.CharField(max_length=1024, db_index=True, null=True, blank=True) def __str__(self): return self.name def get_absolute_url(self): return f'/rooms/{self.pk}/' def get_unread_id(self): return f'forum/room:{self.pk}' class BinaryField(models.Field): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.validators.append(validators.MaxLengthValidator(self.max_length * 2)) def db_type(self, connection): assert connection.settings_dict['ENGINE'] == 'django.db.backends.mysql', 'VARBINARY is mysql only' return f'varbinary({str(self.max_length)})' register_factory(BinaryField, factory=None) class Message(UnreadModel): room = models.ForeignKey(Room, on_delete=models.PROTECT, related_name='messages') text = models.TextField(blank=True, null=True) parent = models.ForeignKey('self', on_delete=models.PROTECT, null=True, blank=True, related_name='replies') path = BinaryField(max_length=1000, db_index=True, null=True) visible = models.BooleanField(default=True) user = models.ForeignKey(User, on_delete=models.PROTECT, related_name='messages') has_replies = models.BooleanField(default=False) custom_data = models.CharField(max_length=1024, db_index=True, null=True, blank=True) def __repr__(self): return f'<Message: {self.pk}>' def get_absolute_url(self): return f'/rooms/{self.room.pk}/message/{self.pk}/' class Meta: ordering = ('path',) @property def indent(self): return (len(self.path) // 8) - 1 @property def indent_rem(self): return self.indent * 2 + 3 @property def gravatar_url(self): return f'https://www.gravatar.com/avatar/{md5(self.user.email.encode()).hexdigest()}?d=identicon' def bytes_from_int(i): return i.to_bytes(length=64 // 8, byteorder='big')
29
111
0.690056
from hashlib import md5 from django.contrib.auth.models import User from django.core import validators from django.db import models from iommi import register_factory from unread.models import UnreadModel class Model(models.Model): def __repr__(self): return f'{type(self)} {self.pk}:{self}' class Meta: abstract = True def get_unread_identifier(self): return f'wiki/context/{self._meta.verbose_name}:{self.pk}' class Room(Model): name = models.CharField(max_length=255) description = models.TextField(blank=True) custom_data = models.CharField(max_length=1024, db_index=True, null=True, blank=True) def __str__(self): return self.name def get_absolute_url(self): return f'/rooms/{self.pk}/' def get_unread_id(self): return f'forum/room:{self.pk}' class BinaryField(models.Field): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.validators.append(validators.MaxLengthValidator(self.max_length * 2)) def db_type(self, connection): assert connection.settings_dict['ENGINE'] == 'django.db.backends.mysql', 'VARBINARY is mysql only' return f'varbinary({str(self.max_length)})' register_factory(BinaryField, factory=None) class Message(UnreadModel): room = models.ForeignKey(Room, on_delete=models.PROTECT, related_name='messages') text = models.TextField(blank=True, null=True) parent = models.ForeignKey('self', on_delete=models.PROTECT, null=True, blank=True, related_name='replies') path = BinaryField(max_length=1000, db_index=True, null=True) visible = models.BooleanField(default=True) user = models.ForeignKey(User, on_delete=models.PROTECT, related_name='messages') has_replies = models.BooleanField(default=False) custom_data = models.CharField(max_length=1024, db_index=True, null=True, blank=True) def __repr__(self): return f'<Message: {self.pk}>' def get_absolute_url(self): return f'/rooms/{self.room.pk}/message/{self.pk}/' class Meta: ordering = ('path',) @property def indent(self): return (len(self.path) // 8) - 1 @property def indent_rem(self): return self.indent * 2 + 3 @property def gravatar_url(self): return f'https://www.gravatar.com/avatar/{md5(self.user.email.encode()).hexdigest()}?d=identicon' def bytes_from_int(i): return i.to_bytes(length=64 // 8, byteorder='big')
true
true
1c45c15f9e69201656c5a6fd742639e0189553ed
15,419
py
Python
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_keys_operations.py
mohamedshabanofficial/azure-sdk-for-python
81c585f310cd2ec23d2ad145173958914a075a58
[ "MIT" ]
2
2019-08-23T21:14:00.000Z
2021-09-07T18:32:34.000Z
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_keys_operations.py
mohamedshabanofficial/azure-sdk-for-python
81c585f310cd2ec23d2ad145173958914a075a58
[ "MIT" ]
2
2021-11-03T06:10:36.000Z
2021-12-01T06:29:39.000Z
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_keys_operations.py
mohamedshabanofficial/azure-sdk-for-python
81c585f310cd2ec23d2ad145173958914a075a58
[ "MIT" ]
1
2021-05-19T02:55:10.000Z
2021-05-19T02:55:10.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class KeysOperations: """KeysOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.synapse.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list_by_workspace( self, resource_group_name: str, workspace_name: str, **kwargs ) -> AsyncIterable["_models.KeyInfoListResult"]: """Returns a list of keys in a workspace. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either KeyInfoListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.KeyInfoListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyInfoListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_workspace.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('KeyInfoListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize(_models.ErrorContract, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys'} # type: ignore async def get( self, resource_group_name: str, workspace_name: str, key_name: str, **kwargs ) -> "_models.Key": """Gets a workspace key. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param key_name: The name of the workspace key. :type key_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Key, or the result of cls(response) :rtype: ~azure.mgmt.synapse.models.Key :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Key"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), 'keyName': self._serialize.url("key_name", key_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorContract, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Key', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}'} # type: ignore async def create_or_update( self, resource_group_name: str, workspace_name: str, key_name: str, key_properties: "_models.Key", **kwargs ) -> "_models.Key": """Creates or updates a workspace key. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param key_name: The name of the workspace key. :type key_name: str :param key_properties: Key put request properties. :type key_properties: ~azure.mgmt.synapse.models.Key :keyword callable cls: A custom type or function that will be passed the direct response :return: Key, or the result of cls(response) :rtype: ~azure.mgmt.synapse.models.Key :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Key"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), 'keyName': self._serialize.url("key_name", key_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(key_properties, 'Key') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorContract, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Key', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}'} # type: ignore async def delete( self, resource_group_name: str, workspace_name: str, key_name: str, **kwargs ) -> Optional["_models.Key"]: """Deletes a workspace key. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param key_name: The name of the workspace key. :type key_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Key, or the result of cls(response) :rtype: ~azure.mgmt.synapse.models.Key or None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Key"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" accept = "application/json" # Construct URL url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), 'keyName': self._serialize.url("key_name", key_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorContract, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('Key', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}'} # type: ignore
48.640379
195
0.660938
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class KeysOperations: models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list_by_workspace( self, resource_group_name: str, workspace_name: str, **kwargs ) -> AsyncIterable["_models.KeyInfoListResult"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" accept = "application/json" def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: url = self.list_by_workspace.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('KeyInfoListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize(_models.ErrorContract, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys'} async def get( self, resource_group_name: str, workspace_name: str, key_name: str, **kwargs ) -> "_models.Key": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" accept = "application/json" url = self.get.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), 'keyName': self._serialize.url("key_name", key_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorContract, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Key', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}'} async def create_or_update( self, resource_group_name: str, workspace_name: str, key_name: str, key_properties: "_models.Key", **kwargs ) -> "_models.Key": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" url = self.create_or_update.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), 'keyName': self._serialize.url("key_name", key_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} body_content = self._serialize.body(key_properties, 'Key') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorContract, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Key', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}'} async def delete( self, resource_group_name: str, workspace_name: str, key_name: str, **kwargs ) -> Optional["_models.Key"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" accept = "application/json" url = self.delete.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), 'keyName': self._serialize.url("key_name", key_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorContract, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('Key', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}'}
true
true
1c45c21bce32039850b2b214ced69db7934f7418
5,134
py
Python
samples/demo.py
siyuan-song/Container
42313132af32f2edf710643b9ceb8ca84693ba5c
[ "MIT" ]
2
2020-07-17T02:24:00.000Z
2020-07-17T21:14:45.000Z
samples/demo.py
siyuan-song/Container
42313132af32f2edf710643b9ceb8ca84693ba5c
[ "MIT" ]
null
null
null
samples/demo.py
siyuan-song/Container
42313132af32f2edf710643b9ceb8ca84693ba5c
[ "MIT" ]
null
null
null
# coding: utf-8 # # Mask R-CNN Demo # # A quick intro to using the pre-trained model to detect and segment objects. # In[1]: import os import sys import random import math import numpy as np import skimage.io import matplotlib import matplotlib.pyplot as plt # Root directory of the project ROOT_DIR = os.path.abspath("../") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the library from mrcnn import utils import mrcnn.model as modellib from mrcnn import visualize # Import COCO config sys.path.append(os.path.join(ROOT_DIR, "samples/container/")) # To find local version import container #get_ipython().run_line_magic('matplotlib', 'inline') # Directory to save logs and trained model MODEL_DIR = os.path.join(ROOT_DIR, "logs") # Local path to trained weights file COCO_MODEL_PATH = os.path.join(ROOT_DIR, "logs/container20200717T1153/mask_rcnn_container_0030.h5") # Download COCO trained weights from Releases if needed if not os.path.exists(COCO_MODEL_PATH): utils.download_trained_weights(COCO_MODEL_PATH) # Directory of images to run detection on IMAGE_DIR = os.path.join(ROOT_DIR, "samples/container/dataset/val") # ## Configurations # # We'll be using a model trained on the MS-COCO dataset. The configurations of this model are in the ```CocoConfig``` class in ```coco.py```. # # For inferencing, modify the configurations a bit to fit the task. To do so, sub-class the ```CocoConfig``` class and override the attributes you need to change. # In[2]: class InferenceConfig(container.ContainerConfig): # Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() config.display() # ## Create Model and Load Trained Weights # In[3]: # Create model object in inference mode. model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config) # Load weights trained on MS-COCO model.load_weights(COCO_MODEL_PATH, by_name=True) # ## Class Names # # The model classifies objects and returns class IDs, which are integer value that identify each class. Some datasets assign integer values to their classes and some don't. For example, in the MS-COCO dataset, the 'person' class is 1 and 'teddy bear' is 88. The IDs are often sequential, but not always. The COCO dataset, for example, has classes associated with class IDs 70 and 72, but not 71. # # To improve consistency, and to support training on data from multiple sources at the same time, our ```Dataset``` class assigns it's own sequential integer IDs to each class. For example, if you load the COCO dataset using our ```Dataset``` class, the 'person' class would get class ID = 1 (just like COCO) and the 'teddy bear' class is 78 (different from COCO). Keep that in mind when mapping class IDs to class names. # # To get the list of class names, you'd load the dataset and then use the ```class_names``` property like this. # ``` # # Load COCO dataset # dataset = coco.CocoDataset() # dataset.load_coco(COCO_DIR, "train") # dataset.prepare() # # # Print class names # print(dataset.class_names) # ``` # # We don't want to require you to download the COCO dataset just to run this demo, so we're including the list of class names below. The index of the class name in the list represent its ID (first class is 0, second is 1, third is 2, ...etc.) # In[4]: # COCO Class names # Index of the class in the list is its ID. For example, to get ID of # the teddy bear class, use: class_names.index('teddy bear') class_names = ['BG','Cola Bottle','Fanta Bottle','Cherry Coke Bottle','Coke Zero Bottle','Mtn Dew Bottle','Cola Can','Fanta Can'] # ## Run Object Detection # In[5]: # Load a random image from the images folder ##file_names = next(os.walk(IMAGE_DIR))[2] ##image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names))) test_image = skimage.io.imread(os.path.join(IMAGE_DIR,'Image0170.png')) test_image = image[:,:,:3] # Run detection results = model.detect([test_image], verbose=1) # Visualize results r = results[0] visualize.display_instances(test_image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']) # Evaluation # Compute VOC-Style mAP @ IoU=0.5 # Running on 40 images. Increase for better accuracy. from container import ContainerDataset dataset_val = ContainerDataset() dataset_val.load_container(os.path.join(ROOT_DIR, "samples/container/dataset"), "val") dataset_val.prepare() image_ids = np.random.choice(dataset_val.image_ids, 40) APs = [] for image_id in image_ids: # Load image and ground truth data image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_val, config, image_id, use_mini_mask=False) image = image[:,:,:3] # Run object detection results = model.detect([image], verbose=0) r = results[0] # Compute AP AP, precisions, recalls, overlaps = utils.compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks']) APs.append(AP) print("mAP: ", np.mean(APs))
34.456376
421
0.730619
import sys import random import math import numpy as np import skimage.io import matplotlib import matplotlib.pyplot as plt ROOT_DIR = os.path.abspath("../") sys.path.append(ROOT_DIR) from mrcnn import utils import mrcnn.model as modellib from mrcnn import visualize sys.path.append(os.path.join(ROOT_DIR, "samples/container/")) import container MODEL_DIR = os.path.join(ROOT_DIR, "logs") COCO_MODEL_PATH = os.path.join(ROOT_DIR, "logs/container20200717T1153/mask_rcnn_container_0030.h5") if not os.path.exists(COCO_MODEL_PATH): utils.download_trained_weights(COCO_MODEL_PATH) IMAGE_DIR = os.path.join(ROOT_DIR, "samples/container/dataset/val") he configurations a bit to fit the task. To do so, sub-class the ```CocoConfig``` class and override the attributes you need to change. # In[2]: class InferenceConfig(container.ContainerConfig): # Set batch size to 1 since we'll be running inference on GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() config.display() onfig) model.load_weights(COCO_MODEL_PATH, by_name=True) ncy, and to support training on data from multiple sources at the same time, our ```Dataset``` class assigns it's own sequential integer IDs to each class. For example, if you load the COCO dataset using our ```Dataset``` class, the 'person' class would get class ID = 1 (just like COCO) and the 'teddy bear' class is 78 (different from COCO). Keep that in mind when mapping class IDs to class names. # ``` # # Load COCO dataset # dataset = coco.CocoDataset() # dataset.load_coco(COCO_DIR, "train") # dataset.prepare() # # # Print class names # print(dataset.class_names) # ``` # # We don't want to require you to download the COCO dataset just to run this demo, so we're including the list of class names below. The index of the class name in the list represent its ID (first class is 0, second is 1, third is 2, ...etc.) # In[4]: # COCO Class names # Index of the class in the list is its ID. For example, to get ID of # the teddy bear class, use: class_names.index('teddy bear') class_names = ['BG','Cola Bottle','Fanta Bottle','Cherry Coke Bottle','Coke Zero Bottle','Mtn Dew Bottle','Cola Can','Fanta Can'] # ## Run Object Detection # In[5]: # Load a random image from the images folder ##file_names = next(os.walk(IMAGE_DIR))[2] ##image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names))) test_image = skimage.io.imread(os.path.join(IMAGE_DIR,'Image0170.png')) test_image = image[:,:,:3] # Run detection results = model.detect([test_image], verbose=1) # Visualize results r = results[0] visualize.display_instances(test_image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']) # Evaluation # Compute VOC-Style mAP @ IoU=0.5 # Running on 40 images. Increase for better accuracy. from container import ContainerDataset dataset_val = ContainerDataset() dataset_val.load_container(os.path.join(ROOT_DIR, "samples/container/dataset"), "val") dataset_val.prepare() image_ids = np.random.choice(dataset_val.image_ids, 40) APs = [] for image_id in image_ids: # Load image and ground truth data image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_val, config, image_id, use_mini_mask=False) image = image[:,:,:3] # Run object detection results = model.detect([image], verbose=0) r = results[0] # Compute AP AP, precisions, recalls, overlaps = utils.compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks']) APs.append(AP) print("mAP: ", np.mean(APs))
true
true
1c45c26bdf9dbb63739a39e2d750920b7e4c23b2
1,886
py
Python
terrascript/heroku/r.py
mjuenema/python-terrascript
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
[ "BSD-2-Clause" ]
507
2017-07-26T02:58:38.000Z
2022-01-21T12:35:13.000Z
terrascript/heroku/r.py
mjuenema/python-terrascript
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
[ "BSD-2-Clause" ]
135
2017-07-20T12:01:59.000Z
2021-10-04T22:25:40.000Z
terrascript/heroku/r.py
mjuenema/python-terrascript
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
[ "BSD-2-Clause" ]
81
2018-02-20T17:55:28.000Z
2022-01-31T07:08:40.000Z
# terrascript/heroku/r.py # Automatically generated by tools/makecode.py () import warnings warnings.warn( "using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2 ) import terrascript class heroku_account_feature(terrascript.Resource): pass class heroku_addon(terrascript.Resource): pass class heroku_addon_attachment(terrascript.Resource): pass class heroku_app(terrascript.Resource): pass class heroku_app_config_association(terrascript.Resource): pass class heroku_app_feature(terrascript.Resource): pass class heroku_app_release(terrascript.Resource): pass class heroku_app_webhook(terrascript.Resource): pass class heroku_build(terrascript.Resource): pass class heroku_cert(terrascript.Resource): pass class heroku_collaborator(terrascript.Resource): pass class heroku_config(terrascript.Resource): pass class heroku_domain(terrascript.Resource): pass class heroku_drain(terrascript.Resource): pass class heroku_formation(terrascript.Resource): pass class heroku_pipeline(terrascript.Resource): pass class heroku_pipeline_config_var(terrascript.Resource): pass class heroku_pipeline_coupling(terrascript.Resource): pass class heroku_review_app_config(terrascript.Resource): pass class heroku_slug(terrascript.Resource): pass class heroku_space(terrascript.Resource): pass class heroku_space_app_access(terrascript.Resource): pass class heroku_space_inbound_ruleset(terrascript.Resource): pass class heroku_space_peering_connection_accepter(terrascript.Resource): pass class heroku_space_vpn_connection(terrascript.Resource): pass class heroku_ssl(terrascript.Resource): pass class heroku_team_collaborator(terrascript.Resource): pass class heroku_team_member(terrascript.Resource): pass
15.459016
79
0.782078
import warnings warnings.warn( "using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2 ) import terrascript class heroku_account_feature(terrascript.Resource): pass class heroku_addon(terrascript.Resource): pass class heroku_addon_attachment(terrascript.Resource): pass class heroku_app(terrascript.Resource): pass class heroku_app_config_association(terrascript.Resource): pass class heroku_app_feature(terrascript.Resource): pass class heroku_app_release(terrascript.Resource): pass class heroku_app_webhook(terrascript.Resource): pass class heroku_build(terrascript.Resource): pass class heroku_cert(terrascript.Resource): pass class heroku_collaborator(terrascript.Resource): pass class heroku_config(terrascript.Resource): pass class heroku_domain(terrascript.Resource): pass class heroku_drain(terrascript.Resource): pass class heroku_formation(terrascript.Resource): pass class heroku_pipeline(terrascript.Resource): pass class heroku_pipeline_config_var(terrascript.Resource): pass class heroku_pipeline_coupling(terrascript.Resource): pass class heroku_review_app_config(terrascript.Resource): pass class heroku_slug(terrascript.Resource): pass class heroku_space(terrascript.Resource): pass class heroku_space_app_access(terrascript.Resource): pass class heroku_space_inbound_ruleset(terrascript.Resource): pass class heroku_space_peering_connection_accepter(terrascript.Resource): pass class heroku_space_vpn_connection(terrascript.Resource): pass class heroku_ssl(terrascript.Resource): pass class heroku_team_collaborator(terrascript.Resource): pass class heroku_team_member(terrascript.Resource): pass
true
true
1c45c374300575c38d0712283bbc628b33dfa7e8
20,243
py
Python
save/tokyo202112_MemGCRN_c1to1_20220208115005_time/traintest_MemGCRN.py
deepkashiwa20/TrafficAccident
c5fb26106137a4e85e5b5aa1e8ffdbb672a61988
[ "MIT" ]
null
null
null
save/tokyo202112_MemGCRN_c1to1_20220208115005_time/traintest_MemGCRN.py
deepkashiwa20/TrafficAccident
c5fb26106137a4e85e5b5aa1e8ffdbb672a61988
[ "MIT" ]
null
null
null
save/tokyo202112_MemGCRN_c1to1_20220208115005_time/traintest_MemGCRN.py
deepkashiwa20/TrafficAccident
c5fb26106137a4e85e5b5aa1e8ffdbb672a61988
[ "MIT" ]
null
null
null
import sys import os import shutil import math import numpy as np import pandas as pd import scipy.sparse as ss from sklearn.preprocessing import StandardScaler, MinMaxScaler from datetime import datetime import time import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F from torchsummary import summary import argparse from configparser import ConfigParser import logging import Metrics from MemGCRN import MemGCRN from Utils import * def refineXSYS(XS, YS): assert opt.time or opt.history, 'it should have one covariate time or history' XCov, YCov = XS[..., -1:], YS[..., -1:] XS, YS = XS[:, :, :, :opt.channelin], YS[:, :, :, :opt.channelout] return XS, YS, XCov, YCov def print_params(model): # print trainable params param_count = 0 logger.info('Trainable parameter list:') for name, param in model.named_parameters(): if param.requires_grad: print(name, param.shape, param.numel()) param_count += param.numel() logger.info(f'\n In total: {param_count} trainable parameters. \n') return def getModel(mode): model = MemGCRN(num_nodes=num_variable, input_dim=opt.channelin, output_dim=opt.channelout, horizon=opt.seq_len, rnn_units=opt.hiddenunits, num_layers=opt.num_layers, mem_num=opt.mem_num, mem_dim=opt.mem_dim, decoder_type=opt.decoder, go_type=opt.go).to(device) if mode == 'train': summary(model, [(opt.his_len, num_variable, opt.channelin), (opt.seq_len, num_variable, opt.channelout)], device=device) print_params(model) for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) else: nn.init.uniform_(p) return model def evaluateModel(model, data_iter, ycov_flag): if opt.loss == 'MSE': criterion = nn.MSELoss() if opt.loss == 'MAE': criterion = nn.L1Loss() separate_loss = nn.TripletMarginLoss(margin=1.0) compact_loss = nn.MSELoss() model.eval() loss_sum, n, YS_pred = 0.0, 0, [] loss_sum1, loss_sum2, loss_sum3 = 0.0, 0.0, 0.0 with torch.no_grad(): if ycov_flag: for x, y, y_cov in data_iter: y_pred, h_att, query, pos, neg = model(x, y_cov) loss1 = criterion(y_pred, y) loss2 = separate_loss(query, pos.detach(), neg.detach()) loss3 = compact_loss(query, pos.detach()) loss = loss1 + opt.lamb * loss2 + opt.lamb1 * loss3 loss_sum += loss.item() * y.shape[0] loss_sum1 += loss1.item() * y.shape[0] loss_sum2 += loss2.item() * y.shape[0] loss_sum3 += loss3.item() * y.shape[0] n += y.shape[0] YS_pred.append(y_pred.cpu().numpy()) else: for x, y in data_iter: y_pred, h_att, query, pos, neg = model(x) loss1 = criterion(y_pred, y) loss2 = separate_loss(query, pos.detach(), neg.detach()) loss3 = compact_loss(query, pos.detach()) loss = loss1 + opt.lamb * loss2 + opt.lamb1 * loss3 loss_sum += loss.item() * y.shape[0] loss_sum1 += loss1.item() * y.shape[0] loss_sum2 += loss2.item() * y.shape[0] loss_sum3 += loss3.item() * y.shape[0] n += y.shape[0] YS_pred.append(y_pred.cpu().numpy()) loss = loss_sum / n loss1 = loss_sum1 / n loss2 = loss_sum2 / n loss3 = loss_sum3 / n YS_pred = np.vstack(YS_pred) return loss, loss1, loss2, loss3, YS_pred def trainModel(name, mode, XS, YS, YCov): logger.info('Model Training Started ...', time.ctime()) logger.info('TIMESTEP_IN, TIMESTEP_OUT', opt.his_len, opt.seq_len) model = getModel(mode) XS_torch, YS_torch = torch.Tensor(XS).to(device), torch.Tensor(YS).to(device) logger.info('XS_torch.shape: ', XS_torch.shape) logger.info('YS_torch.shape: ', YS_torch.shape) if YCov is not None: YCov_torch = torch.Tensor(YCov).to(device) logger.info('YCov_torch.shape: ', YCov_torch.shape) trainval_data = torch.utils.data.TensorDataset(XS_torch, YS_torch, YCov_torch) else: trainval_data = torch.utils.data.TensorDataset(XS_torch, YS_torch) trainval_size = len(trainval_data) train_size = int(trainval_size * (1 - opt.val_ratio)) train_data = torch.utils.data.Subset(trainval_data, list(range(0, train_size))) val_data = torch.utils.data.Subset(trainval_data, list(range(train_size, trainval_size))) train_iter = torch.utils.data.DataLoader(train_data, opt.batch_size, shuffle=False) # drop_last=True val_iter = torch.utils.data.DataLoader(val_data, opt.batch_size, shuffle=False) # drop_last=True trainval_iter = torch.utils.data.DataLoader(trainval_data, opt.batch_size, shuffle=False) # drop_last=True optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr) if opt.loss == 'MSE': criterion = nn.MSELoss() if opt.loss == 'MAE': criterion = nn.L1Loss() separate_loss = nn.TripletMarginLoss(margin=1.0) compact_loss = nn.MSELoss() min_val_loss = np.inf wait = 0 for epoch in range(opt.epoch): starttime = datetime.now() loss_sum, n = 0.0, 0 loss_sum1, loss_sum2, loss_sum3 = 0.0, 0.0, 0.0 model.train() if YCov is not None: for x, y, ycov in train_iter: optimizer.zero_grad() y_pred, h_att, query, pos, neg = model(x, ycov) loss1 = criterion(y_pred, y) loss2 = separate_loss(query, pos.detach(), neg.detach()) loss3 = compact_loss(query, pos.detach()) loss = loss1 + opt.lamb * loss2 + opt.lamb1 * loss3 loss.backward() optimizer.step() loss_sum += loss.item() * y.shape[0] loss_sum1 += loss1.item() * y.shape[0] loss_sum2 += loss2.item() * y.shape[0] loss_sum3 += loss3.item() * y.shape[0] n += y.shape[0] else: for x, y in train_iter: optimizer.zero_grad() y_pred, h_att, query, pos, neg = model(x) loss1 = criterion(y_pred, y) loss2 = separate_loss(query, pos.detach(), neg.detach()) loss3 = compact_loss(query, pos.detach()) loss = loss1 + opt.lamb * loss2 + opt.lamb1 * loss3 loss.backward() optimizer.step() loss_sum += loss.item() * y.shape[0] loss_sum1 += loss1.item() * y.shape[0] loss_sum2 += loss2.item() * y.shape[0] loss_sum3 += loss3.item() * y.shape[0] n += y.shape[0] train_loss = loss_sum / n train_loss1 = loss_sum1 / n train_loss2 = loss_sum2 / n train_loss3 = loss_sum3 / n val_loss, val_loss1, val_loss2, val_loss3, _ = evaluateModel(model, val_iter, YCov is not None) if val_loss < min_val_loss: wait = 0 min_val_loss = val_loss torch.save(model.state_dict(), modelpt_path) else: wait += 1 if wait == opt.patience: logger.info('Early stopping at epoch: %d' % epoch) break endtime = datetime.now() epoch_time = (endtime - starttime).seconds logger.info("epoch", epoch, "time used:", epoch_time," seconds ", "train loss:", train_loss, train_loss1, train_loss2, train_loss3, "validation loss:", val_loss, val_loss1, val_loss2, val_loss3) with open(epochlog_path, 'a') as f: f.write("%s, %d, %s, %d, %s, %s, %.6f, %s, %.6f\n" % ("epoch", epoch, "time used", epoch_time, "seconds", "train loss", train_loss, "validation loss:", val_loss)) # torch_score = train_loss loss, loss1, loss2, loss3, YS_pred = evaluateModel(model, trainval_iter, YCov is not None) logger.info('trainval loss, loss1, loss2, loss3', loss, loss1, loss2, loss3) logger.info('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape) YS = YS[:YS_pred.shape[0], ...] YS, YS_pred = np.squeeze(YS), np.squeeze(YS_pred) YS, YS_pred = YS.reshape(-1, YS.shape[-1]), YS_pred.reshape(-1, YS_pred.shape[-1]) YS, YS_pred = scaler.inverse_transform(YS), scaler.inverse_transform(YS_pred) YS, YS_pred = YS.reshape(-1, opt.seq_len, YS.shape[-1]), YS_pred.reshape(-1, opt.seq_len, YS_pred.shape[-1]) logger.info('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape) MSE, RMSE, MAE, MAPE = Metrics.evaluate(YS, YS_pred) logger.info('*' * 40) logger.info("%s, %s, Torch MSE, %.6f, %.6f, %.6f, %.6f" % (name, mode, train_loss, train_loss1, train_loss2, train_loss3)) logger.info("%s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f" % (name, mode, MSE, RMSE, MAE, MAPE)) logger.info('Model Training Ended ...', time.ctime()) def testModel(name, mode, XS, YS, YCov, Mask=None): def testScore(YS, YS_pred, message): MSE, RMSE, MAE, MAPE = Metrics.evaluate(YS, YS_pred) logger.info(message) logger.info('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape) logger.info("%s, %s, Torch MSE, %.6f, %.6f, %.6f, %.6f" % (name, mode, loss, loss1, loss2, loss3)) logger.info("all pred steps, %s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f" % (name, mode, MSE, RMSE, MAE, MAPE)) with open(score_path, 'a') as f: f.write("all pred steps, %s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f\n" % (name, mode, MSE, RMSE, MAE, MAPE)) for i in range(opt.seq_len): MSE, RMSE, MAE, MAPE = Metrics.evaluate(YS[..., i], YS_pred[..., i]) logger.info("%d step, %s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f" % (i+1, name, mode, MSE, RMSE, MAE, MAPE)) f.write("%d step, %s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f\n" % (i+1, name, mode, MSE, RMSE, MAE, MAPE)) return None logger.info('Model Testing Started ...', time.ctime()) logger.info('TIMESTEP_IN, TIMESTEP_OUT', opt.his_len, opt.seq_len) model = getModel(mode) model.load_state_dict(torch.load(modelpt_path)) XS_torch, YS_torch = torch.Tensor(XS).to(device), torch.Tensor(YS).to(device) if YCov is not None: YCov_torch = torch.Tensor(YCov).to(device) test_data = torch.utils.data.TensorDataset(XS_torch, YS_torch, YCov_torch) else: test_data = torch.utils.data.TensorDataset(XS_torch, YS_torch) test_iter = torch.utils.data.DataLoader(test_data, opt.batch_size, shuffle=False) # drop_last=True loss, loss1, loss2, loss3, YS_pred = evaluateModel(model, test_iter, YCov is not None) logger.info('test loss, loss1, loss2, loss3', loss, loss1, loss2, loss3) logger.info('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape) YS = YS[:YS_pred.shape[0], ...] YS, YS_pred = np.squeeze(YS), np.squeeze(YS_pred) YS, YS_pred = YS.reshape(-1, YS.shape[-1]), YS_pred.reshape(-1, YS_pred.shape[-1]) YS, YS_pred = scaler.inverse_transform(YS), scaler.inverse_transform(YS_pred) YS, YS_pred = YS.reshape(-1, opt.seq_len, YS.shape[-1]), YS_pred.reshape(-1, opt.seq_len, YS_pred.shape[-1]) YS, YS_pred = YS.transpose(0, 2, 1), YS_pred.transpose(0, 2, 1) # np.save(path + f'/{name}_prediction.npy', YS_pred) # np.save(path + f'/{name}_groundtruth.npy', YS) # np.save(path + f'/{name}_Mask_t1.npy', Mask) testScore(YS, YS_pred, '********* Evaluation on the whole testing dataset *********') testScore(YS[Mask], YS_pred[Mask], '********* Evaluation on the selected testing dataset when incident happen at t+1 *********') logger.info('Model Testing Ended ...', time.ctime()) ######################################################################################### parser = argparse.ArgumentParser() parser.add_argument("--loss", type=str, default='MAE', help="MAE, MSE, SELF") parser.add_argument("--epoch", type=int, default=200, help="number of epochs of training") parser.add_argument("--batch_size", type=int, default=64, help="size of the batches") parser.add_argument("--lr", type=float, default=0.001, help="adam: learning rate") parser.add_argument("--patience", type=float, default=10, help="patience used for early stop") parser.add_argument('--val_ratio', type=float, default=0.25, help='the ratio of validation data among the trainval ratio') parser.add_argument('--seed', type=int, default=1234, help='Random seed.') parser.add_argument('--seq_len', type=int, default=6, help='sequence length of values, which should be even nums (2,4,6,12)') parser.add_argument('--his_len', type=int, default=6, help='sequence length of observed historical values') parser.add_argument('--month', type=str, default='202112', help='which experiment setting (month) to run') parser.add_argument('--city', type=str, default='tokyo', help='which experiment setting (city) to run') parser.add_argument('--channelin', type=int, default=1, help='number of input channel') parser.add_argument('--channelout', type=int, default=1, help='number of output channel') parser.add_argument('--time', type=bool, default=False, help='whether to use float time embedding') parser.add_argument('--history', type=bool, default=False, help='whether to use historical data') parser.add_argument('--num_layers', type=int, default=1, help='number of layers') parser.add_argument('--hiddenunits', type=int, default=32, help='number of hidden units') parser.add_argument('--mem_num', type=int, default=10, help='number of memory') parser.add_argument('--mem_dim', type=int, default=32, help='dimension of memory') parser.add_argument("--decoder", type=str, default='stepwise', help="which type of decoder: stepwise or stepwise") parser.add_argument('--ycov', type=str, default='time', help='which ycov to use: time or history') parser.add_argument('--go', type=str, default='random', help='which type of decoder go: random or last') parser.add_argument('--model', type=str, default='MemGCRN', help='which model to use') parser.add_argument('--gpu', type=int, default=3, help='which gpu to use') parser.add_argument('--lamb', type=float, default=0.01, help='lamb value for separate loss') parser.add_argument('--lamb1', type=float, default=0.01, help='lamb1 value for compact loss') opt = parser.parse_args() # optimal1: --ycov=history --go=random --lamb=0.01 --lamb1=0.01 # optimal2: --ycov=time --go=last --lamb=0.01 --lamb1=0.0 config = ConfigParser() config.read('params.txt', encoding='UTF-8') train_month = eval(config[opt.month]['train_month']) test_month = eval(config[opt.month]['test_month']) traffic_path = config[opt.month]['traffic_path'] subroad_path = config[opt.city]['subroad_path'] road_path = config['common']['road_path'] adj_path = config['common']['adjdis_path'] # adj_path = config['common']['adj01_path'] num_variable = len(np.loadtxt(subroad_path).astype(int)) N_link = config.getint('common', 'N_link') feature_list = ['speed_typea'] if opt.ycov=='time': opt.time = True elif opt.ycov=='history': opt.history = True else: assert False, 'ycov type must be float time or float history value' if opt.time: feature_list.append('weekdaytime') if opt.history: feature_list.append('speed_typea_y') # opt.channelin = len(feature_list) # Here, input for the encoder is just speed, w/o xcov is better. # feature_list = ['speed_typea', 'accident_flag', 'real_accident_flag', 'weekdaytime', 'speed_typea_y'] _, filename = os.path.split(os.path.abspath(sys.argv[0])) filename = os.path.splitext(filename)[0] model_name = opt.model timestring = time.strftime('%Y%m%d%H%M%S', time.localtime()) path = f'../save/{opt.city}{opt.month}_{model_name}_c{opt.channelin}to{opt.channelout}_{timestring}_{opt.ycov}' logging_path = f'{path}/{model_name}_{timestring}_logging.txt' score_path = f'{path}/{model_name}_{timestring}_scores.txt' epochlog_path = f'{path}/{model_name}_{timestring}_epochlog.txt' modelpt_path = f'{path}/{model_name}_{timestring}.pt' if not os.path.exists(path): os.makedirs(path) shutil.copy2(sys.argv[0], path) shutil.copy2(f'{model_name}.py', path) logger = logging.getLogger(__name__) logger.setLevel(level = logging.INFO) class MyFormatter(logging.Formatter): def format(self, record): spliter = ' ' record.msg = str(record.msg) + spliter + spliter.join(map(str, record.args)) record.args = tuple() # set empty to args return super().format(record) formatter = MyFormatter() handler = logging.FileHandler(logging_path, mode='a') handler.setLevel(logging.INFO) handler.setFormatter(formatter) console = logging.StreamHandler() console.setLevel(logging.INFO) console.setFormatter(formatter) logger.addHandler(handler) logger.addHandler(console) logger.info('lamb', opt.lamb) logger.info('lamb1', opt.lamb1) logger.info('experiment_city', opt.city) logger.info('experiment_month', opt.month) logger.info('model_name', opt.model) logger.info('mem_num', opt.mem_num) logger.info('mem_dim', opt.mem_dim) logger.info('decoder_type', opt.decoder) logger.info('go_type', opt.go) logger.info('ycov_type', opt.ycov) logger.info('batch_size', opt.batch_size) logger.info('rnn_units', opt.hiddenunits) logger.info('num_layers', opt.num_layers) logger.info('channnel_in', opt.channelin) logger.info('channnel_out', opt.channelout) logger.info('feature_time', opt.time) logger.info('feature_history', opt.history) ##################################################################################################### cpu_num = 1 os.environ ['OMP_NUM_THREADS'] = str(cpu_num) os.environ ['OPENBLAS_NUM_THREADS'] = str(cpu_num) os.environ ['MKL_NUM_THREADS'] = str(cpu_num) os.environ ['VECLIB_MAXIMUM_THREADS'] = str(cpu_num) os.environ ['NUMEXPR_NUM_THREADS'] = str(cpu_num) torch.set_num_threads(cpu_num) device = torch.device("cuda:{}".format(opt.gpu)) if torch.cuda.is_available() else torch.device("cpu") np.random.seed(opt.seed) torch.manual_seed(opt.seed) if torch.cuda.is_available(): torch.cuda.manual_seed(opt.seed) scaler = StandardScaler() def main(): train_data = [get_data(config[month]['traffic_path'], N_link, subroad_path, feature_list) for month in train_month] test_data = [get_data(config[month]['traffic_path'], N_link, subroad_path, feature_list) for month in test_month] test_flag = [get_data(config[month]['traffic_path'], N_link, subroad_path, ['accident_flag']) for month in test_month] speed_data = [] for data in train_data: speed_data.append(data[:,:,0]) for data in test_data: speed_data.append(data[:,:,0]) speed_data = np.vstack(speed_data) scaler.fit(speed_data) for data in train_data: logger.info('train_data', data.shape) data[:,:,0] = scaler.transform(data[:,:,0]) for data in test_data: logger.info('test_data', data.shape) data[:,:,0] = scaler.transform(data[:,:,0]) logger.info(opt.city, opt.month, 'training started', time.ctime()) trainXS, trainYS = getXSYS(train_data, opt.his_len, opt.seq_len) trainXS, trainYS, trainXCov, trainYCov = refineXSYS(trainXS, trainYS) logger.info('TRAIN XS.shape YS.shape, XCov.shape, YCov.shape', trainXS.shape, trainYS.shape, trainXCov.shape, trainYCov.shape) trainModel(model_name, 'train', trainXS, trainYS, trainYCov) logger.info(opt.city, opt.month, 'testing started', time.ctime()) testXS, testYS = getXSYS(test_data, opt.his_len, opt.seq_len) _, testYSFlag = getXSYS(test_flag, opt.his_len, opt.seq_len) testYMask = testYSFlag[:, 0, :, 0] > 0 # (B, N) incident happen at the first prediction timeslot, t+1. testXS, testYS, testXCov, testYCov = refineXSYS(testXS, testYS) logger.info('TEST XS.shape, YS.shape, XCov.shape, YCov.shape, YMask.shape', testXS.shape, testYS.shape, testXCov.shape, testYCov.shape, testYMask.shape) testModel(model_name, 'test', testXS, testYS, testYCov, testYMask) if __name__ == '__main__': main()
50.230769
202
0.644173
import sys import os import shutil import math import numpy as np import pandas as pd import scipy.sparse as ss from sklearn.preprocessing import StandardScaler, MinMaxScaler from datetime import datetime import time import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F from torchsummary import summary import argparse from configparser import ConfigParser import logging import Metrics from MemGCRN import MemGCRN from Utils import * def refineXSYS(XS, YS): assert opt.time or opt.history, 'it should have one covariate time or history' XCov, YCov = XS[..., -1:], YS[..., -1:] XS, YS = XS[:, :, :, :opt.channelin], YS[:, :, :, :opt.channelout] return XS, YS, XCov, YCov def print_params(model): param_count = 0 logger.info('Trainable parameter list:') for name, param in model.named_parameters(): if param.requires_grad: print(name, param.shape, param.numel()) param_count += param.numel() logger.info(f'\n In total: {param_count} trainable parameters. \n') return def getModel(mode): model = MemGCRN(num_nodes=num_variable, input_dim=opt.channelin, output_dim=opt.channelout, horizon=opt.seq_len, rnn_units=opt.hiddenunits, num_layers=opt.num_layers, mem_num=opt.mem_num, mem_dim=opt.mem_dim, decoder_type=opt.decoder, go_type=opt.go).to(device) if mode == 'train': summary(model, [(opt.his_len, num_variable, opt.channelin), (opt.seq_len, num_variable, opt.channelout)], device=device) print_params(model) for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) else: nn.init.uniform_(p) return model def evaluateModel(model, data_iter, ycov_flag): if opt.loss == 'MSE': criterion = nn.MSELoss() if opt.loss == 'MAE': criterion = nn.L1Loss() separate_loss = nn.TripletMarginLoss(margin=1.0) compact_loss = nn.MSELoss() model.eval() loss_sum, n, YS_pred = 0.0, 0, [] loss_sum1, loss_sum2, loss_sum3 = 0.0, 0.0, 0.0 with torch.no_grad(): if ycov_flag: for x, y, y_cov in data_iter: y_pred, h_att, query, pos, neg = model(x, y_cov) loss1 = criterion(y_pred, y) loss2 = separate_loss(query, pos.detach(), neg.detach()) loss3 = compact_loss(query, pos.detach()) loss = loss1 + opt.lamb * loss2 + opt.lamb1 * loss3 loss_sum += loss.item() * y.shape[0] loss_sum1 += loss1.item() * y.shape[0] loss_sum2 += loss2.item() * y.shape[0] loss_sum3 += loss3.item() * y.shape[0] n += y.shape[0] YS_pred.append(y_pred.cpu().numpy()) else: for x, y in data_iter: y_pred, h_att, query, pos, neg = model(x) loss1 = criterion(y_pred, y) loss2 = separate_loss(query, pos.detach(), neg.detach()) loss3 = compact_loss(query, pos.detach()) loss = loss1 + opt.lamb * loss2 + opt.lamb1 * loss3 loss_sum += loss.item() * y.shape[0] loss_sum1 += loss1.item() * y.shape[0] loss_sum2 += loss2.item() * y.shape[0] loss_sum3 += loss3.item() * y.shape[0] n += y.shape[0] YS_pred.append(y_pred.cpu().numpy()) loss = loss_sum / n loss1 = loss_sum1 / n loss2 = loss_sum2 / n loss3 = loss_sum3 / n YS_pred = np.vstack(YS_pred) return loss, loss1, loss2, loss3, YS_pred def trainModel(name, mode, XS, YS, YCov): logger.info('Model Training Started ...', time.ctime()) logger.info('TIMESTEP_IN, TIMESTEP_OUT', opt.his_len, opt.seq_len) model = getModel(mode) XS_torch, YS_torch = torch.Tensor(XS).to(device), torch.Tensor(YS).to(device) logger.info('XS_torch.shape: ', XS_torch.shape) logger.info('YS_torch.shape: ', YS_torch.shape) if YCov is not None: YCov_torch = torch.Tensor(YCov).to(device) logger.info('YCov_torch.shape: ', YCov_torch.shape) trainval_data = torch.utils.data.TensorDataset(XS_torch, YS_torch, YCov_torch) else: trainval_data = torch.utils.data.TensorDataset(XS_torch, YS_torch) trainval_size = len(trainval_data) train_size = int(trainval_size * (1 - opt.val_ratio)) train_data = torch.utils.data.Subset(trainval_data, list(range(0, train_size))) val_data = torch.utils.data.Subset(trainval_data, list(range(train_size, trainval_size))) train_iter = torch.utils.data.DataLoader(train_data, opt.batch_size, shuffle=False) val_iter = torch.utils.data.DataLoader(val_data, opt.batch_size, shuffle=False) trainval_iter = torch.utils.data.DataLoader(trainval_data, opt.batch_size, shuffle=False) optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr) if opt.loss == 'MSE': criterion = nn.MSELoss() if opt.loss == 'MAE': criterion = nn.L1Loss() separate_loss = nn.TripletMarginLoss(margin=1.0) compact_loss = nn.MSELoss() min_val_loss = np.inf wait = 0 for epoch in range(opt.epoch): starttime = datetime.now() loss_sum, n = 0.0, 0 loss_sum1, loss_sum2, loss_sum3 = 0.0, 0.0, 0.0 model.train() if YCov is not None: for x, y, ycov in train_iter: optimizer.zero_grad() y_pred, h_att, query, pos, neg = model(x, ycov) loss1 = criterion(y_pred, y) loss2 = separate_loss(query, pos.detach(), neg.detach()) loss3 = compact_loss(query, pos.detach()) loss = loss1 + opt.lamb * loss2 + opt.lamb1 * loss3 loss.backward() optimizer.step() loss_sum += loss.item() * y.shape[0] loss_sum1 += loss1.item() * y.shape[0] loss_sum2 += loss2.item() * y.shape[0] loss_sum3 += loss3.item() * y.shape[0] n += y.shape[0] else: for x, y in train_iter: optimizer.zero_grad() y_pred, h_att, query, pos, neg = model(x) loss1 = criterion(y_pred, y) loss2 = separate_loss(query, pos.detach(), neg.detach()) loss3 = compact_loss(query, pos.detach()) loss = loss1 + opt.lamb * loss2 + opt.lamb1 * loss3 loss.backward() optimizer.step() loss_sum += loss.item() * y.shape[0] loss_sum1 += loss1.item() * y.shape[0] loss_sum2 += loss2.item() * y.shape[0] loss_sum3 += loss3.item() * y.shape[0] n += y.shape[0] train_loss = loss_sum / n train_loss1 = loss_sum1 / n train_loss2 = loss_sum2 / n train_loss3 = loss_sum3 / n val_loss, val_loss1, val_loss2, val_loss3, _ = evaluateModel(model, val_iter, YCov is not None) if val_loss < min_val_loss: wait = 0 min_val_loss = val_loss torch.save(model.state_dict(), modelpt_path) else: wait += 1 if wait == opt.patience: logger.info('Early stopping at epoch: %d' % epoch) break endtime = datetime.now() epoch_time = (endtime - starttime).seconds logger.info("epoch", epoch, "time used:", epoch_time," seconds ", "train loss:", train_loss, train_loss1, train_loss2, train_loss3, "validation loss:", val_loss, val_loss1, val_loss2, val_loss3) with open(epochlog_path, 'a') as f: f.write("%s, %d, %s, %d, %s, %s, %.6f, %s, %.6f\n" % ("epoch", epoch, "time used", epoch_time, "seconds", "train loss", train_loss, "validation loss:", val_loss)) loss, loss1, loss2, loss3, YS_pred = evaluateModel(model, trainval_iter, YCov is not None) logger.info('trainval loss, loss1, loss2, loss3', loss, loss1, loss2, loss3) logger.info('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape) YS = YS[:YS_pred.shape[0], ...] YS, YS_pred = np.squeeze(YS), np.squeeze(YS_pred) YS, YS_pred = YS.reshape(-1, YS.shape[-1]), YS_pred.reshape(-1, YS_pred.shape[-1]) YS, YS_pred = scaler.inverse_transform(YS), scaler.inverse_transform(YS_pred) YS, YS_pred = YS.reshape(-1, opt.seq_len, YS.shape[-1]), YS_pred.reshape(-1, opt.seq_len, YS_pred.shape[-1]) logger.info('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape) MSE, RMSE, MAE, MAPE = Metrics.evaluate(YS, YS_pred) logger.info('*' * 40) logger.info("%s, %s, Torch MSE, %.6f, %.6f, %.6f, %.6f" % (name, mode, train_loss, train_loss1, train_loss2, train_loss3)) logger.info("%s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f" % (name, mode, MSE, RMSE, MAE, MAPE)) logger.info('Model Training Ended ...', time.ctime()) def testModel(name, mode, XS, YS, YCov, Mask=None): def testScore(YS, YS_pred, message): MSE, RMSE, MAE, MAPE = Metrics.evaluate(YS, YS_pred) logger.info(message) logger.info('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape) logger.info("%s, %s, Torch MSE, %.6f, %.6f, %.6f, %.6f" % (name, mode, loss, loss1, loss2, loss3)) logger.info("all pred steps, %s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f" % (name, mode, MSE, RMSE, MAE, MAPE)) with open(score_path, 'a') as f: f.write("all pred steps, %s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f\n" % (name, mode, MSE, RMSE, MAE, MAPE)) for i in range(opt.seq_len): MSE, RMSE, MAE, MAPE = Metrics.evaluate(YS[..., i], YS_pred[..., i]) logger.info("%d step, %s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f" % (i+1, name, mode, MSE, RMSE, MAE, MAPE)) f.write("%d step, %s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f\n" % (i+1, name, mode, MSE, RMSE, MAE, MAPE)) return None logger.info('Model Testing Started ...', time.ctime()) logger.info('TIMESTEP_IN, TIMESTEP_OUT', opt.his_len, opt.seq_len) model = getModel(mode) model.load_state_dict(torch.load(modelpt_path)) XS_torch, YS_torch = torch.Tensor(XS).to(device), torch.Tensor(YS).to(device) if YCov is not None: YCov_torch = torch.Tensor(YCov).to(device) test_data = torch.utils.data.TensorDataset(XS_torch, YS_torch, YCov_torch) else: test_data = torch.utils.data.TensorDataset(XS_torch, YS_torch) test_iter = torch.utils.data.DataLoader(test_data, opt.batch_size, shuffle=False) loss, loss1, loss2, loss3, YS_pred = evaluateModel(model, test_iter, YCov is not None) logger.info('test loss, loss1, loss2, loss3', loss, loss1, loss2, loss3) logger.info('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape) YS = YS[:YS_pred.shape[0], ...] YS, YS_pred = np.squeeze(YS), np.squeeze(YS_pred) YS, YS_pred = YS.reshape(-1, YS.shape[-1]), YS_pred.reshape(-1, YS_pred.shape[-1]) YS, YS_pred = scaler.inverse_transform(YS), scaler.inverse_transform(YS_pred) YS, YS_pred = YS.reshape(-1, opt.seq_len, YS.shape[-1]), YS_pred.reshape(-1, opt.seq_len, YS_pred.shape[-1]) YS, YS_pred = YS.transpose(0, 2, 1), YS_pred.transpose(0, 2, 1) testScore(YS, YS_pred, '********* Evaluation on the whole testing dataset *********') testScore(YS[Mask], YS_pred[Mask], '********* Evaluation on the selected testing dataset when incident happen at t+1 *********') logger.info('Model Testing Ended ...', time.ctime()) ) handler.setLevel(logging.INFO) handler.setFormatter(formatter) console = logging.StreamHandler() console.setLevel(logging.INFO) console.setFormatter(formatter) logger.addHandler(handler) logger.addHandler(console) logger.info('lamb', opt.lamb) logger.info('lamb1', opt.lamb1) logger.info('experiment_city', opt.city) logger.info('experiment_month', opt.month) logger.info('model_name', opt.model) logger.info('mem_num', opt.mem_num) logger.info('mem_dim', opt.mem_dim) logger.info('decoder_type', opt.decoder) logger.info('go_type', opt.go) logger.info('ycov_type', opt.ycov) logger.info('batch_size', opt.batch_size) logger.info('rnn_units', opt.hiddenunits) logger.info('num_layers', opt.num_layers) logger.info('channnel_in', opt.channelin) logger.info('channnel_out', opt.channelout) logger.info('feature_time', opt.time) logger.info('feature_history', opt.history)
true
true
1c45c40bf2888f61ea031bfc6439f06b59f6dae5
664
py
Python
manage.py
njiiri12/neighbourhood
e36f04f450c352f3947ff991118e4c06cc5bcb87
[ "MIT" ]
null
null
null
manage.py
njiiri12/neighbourhood
e36f04f450c352f3947ff991118e4c06cc5bcb87
[ "MIT" ]
null
null
null
manage.py
njiiri12/neighbourhood
e36f04f450c352f3947ff991118e4c06cc5bcb87
[ "MIT" ]
null
null
null
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'NH_watch.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
28.869565
73
0.679217
import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'NH_watch.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
true
true
1c45c46dba085dae7fe79cedf838de675bd3c279
5,081
py
Python
opps/articles/views.py
jeanmask/opps
031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87
[ "MIT" ]
159
2015-01-03T16:36:35.000Z
2022-03-29T20:50:13.000Z
opps/articles/views.py
jeanmask/opps
031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87
[ "MIT" ]
81
2015-01-02T21:26:16.000Z
2021-05-29T12:24:52.000Z
opps/articles/views.py
jeanmask/opps
031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87
[ "MIT" ]
75
2015-01-23T13:41:03.000Z
2021-09-24T03:45:23.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.contrib.sites.models import get_current_site from django.utils import timezone from django.conf import settings from opps.views.generic.list import ListView from opps.containers.views import ContainerList from opps.containers.models import Container, ContainerBox from opps.articles.models import Album class AlbumList(ContainerList): model = Album type = 'articles' def get_template_names(self): templates = [] domain_folder = self.get_template_folder() list_name = 'list' templates.append('{0}/{1}/{2}.html'.format( self.model._meta.app_label, self.model._meta.module_name, list_name)) if self.request.GET.get('page') and\ self.__class__.__name__ not in\ settings.OPPS_PAGINATE_NOT_APP: templates.append('{0}/{1}/{2}/{3}_paginated.html'.format( domain_folder, self.model._meta.app_label, self.model._meta.module_name, list_name)) return templates def get_queryset(self): # TODO: refatoring, used super() self.site = get_current_site(self.request) self.long_slug = self.get_long_slug() if not self.long_slug: return None self.set_channel_rules() self.articleboxes = ContainerBox.objects.filter( channel__long_slug=self.long_slug) is_paginated = self.page_kwarg in self.request.GET if not is_paginated: for box in self.articleboxes: self.excluded_ids.update( [a.pk for a in box.ordered_containers()]) filters = {} filters['site_domain'] = self.site.domain filters['date_available__lte'] = timezone.now() filters['published'] = True filters['child_class'] = 'Album' if self.channel and self.channel.is_root_node() and not is_paginated: filters['show_on_root_channel'] = True queryset = Container.objects.filter( **filters).exclude(pk__in=self.excluded_ids) return queryset._clone() class AlbumChannelList(ListView): model = Album type = 'articles' template_name_suffix = 'album' def get_template_list(self, domain_folder="containers"): templates = [] list_name = 'list' if self.template_name_suffix: list_fullname = "{0}_{1}".format(self.template_name_suffix, list_name) if self.channel: if self.channel.group and self.channel.parent: templates.append('{0}/{1}/{2}.html'.format( domain_folder, self.channel.parent.long_slug, list_fullname)) if self.request.GET.get('page') and\ self.__class__.__name__ not in\ settings.OPPS_PAGINATE_NOT_APP: templates.append('{0}/{1}/{2}_paginated.html'.format( domain_folder, self.channel.parent.long_slug, list_fullname)) if self.request.GET.get('page') and\ self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP: templates.append('{0}/{1}/{2}_paginated.html'.format( domain_folder, self.channel.long_slug, list_fullname)) templates.append('{0}/{1}/{2}.html'.format( domain_folder, self.channel.long_slug, list_fullname)) for t in self.channel.get_ancestors()[::-1]: templates.append('{0}/{1}/{2}.html'.format( domain_folder, t.long_slug, list_fullname)) if self.request.GET.get('page') and\ self.__class__.__name__ not in\ settings.OPPS_PAGINATE_NOT_APP: templates.append('{0}/{1}/{2}_paginated.html'.format( domain_folder, t.long_slug, list_fullname)) if self.request.GET.get('page') and\ self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP: templates.append('{0}/{1}_paginated.html'.format(domain_folder, list_fullname)) templates.append('{0}/{1}/{2}.html'.format( self.model._meta.app_label, self.model._meta.module_name, list_name)) return templates def get_template_names(self): domain_folder = self.get_template_folder() template_list = self.get_template_list(domain_folder) return template_list def get_queryset(self): self.site = get_current_site(self.request) queryset = super(AlbumChannelList, self).get_queryset() filters = {} filters['site_domain'] = self.site.domain filters['date_available__lte'] = timezone.now() filters['published'] = True filters['show_on_root_channel'] = True queryset = queryset.filter(**filters) return queryset._clone()
35.531469
77
0.597717
from django.contrib.sites.models import get_current_site from django.utils import timezone from django.conf import settings from opps.views.generic.list import ListView from opps.containers.views import ContainerList from opps.containers.models import Container, ContainerBox from opps.articles.models import Album class AlbumList(ContainerList): model = Album type = 'articles' def get_template_names(self): templates = [] domain_folder = self.get_template_folder() list_name = 'list' templates.append('{0}/{1}/{2}.html'.format( self.model._meta.app_label, self.model._meta.module_name, list_name)) if self.request.GET.get('page') and\ self.__class__.__name__ not in\ settings.OPPS_PAGINATE_NOT_APP: templates.append('{0}/{1}/{2}/{3}_paginated.html'.format( domain_folder, self.model._meta.app_label, self.model._meta.module_name, list_name)) return templates def get_queryset(self): self.site = get_current_site(self.request) self.long_slug = self.get_long_slug() if not self.long_slug: return None self.set_channel_rules() self.articleboxes = ContainerBox.objects.filter( channel__long_slug=self.long_slug) is_paginated = self.page_kwarg in self.request.GET if not is_paginated: for box in self.articleboxes: self.excluded_ids.update( [a.pk for a in box.ordered_containers()]) filters = {} filters['site_domain'] = self.site.domain filters['date_available__lte'] = timezone.now() filters['published'] = True filters['child_class'] = 'Album' if self.channel and self.channel.is_root_node() and not is_paginated: filters['show_on_root_channel'] = True queryset = Container.objects.filter( **filters).exclude(pk__in=self.excluded_ids) return queryset._clone() class AlbumChannelList(ListView): model = Album type = 'articles' template_name_suffix = 'album' def get_template_list(self, domain_folder="containers"): templates = [] list_name = 'list' if self.template_name_suffix: list_fullname = "{0}_{1}".format(self.template_name_suffix, list_name) if self.channel: if self.channel.group and self.channel.parent: templates.append('{0}/{1}/{2}.html'.format( domain_folder, self.channel.parent.long_slug, list_fullname)) if self.request.GET.get('page') and\ self.__class__.__name__ not in\ settings.OPPS_PAGINATE_NOT_APP: templates.append('{0}/{1}/{2}_paginated.html'.format( domain_folder, self.channel.parent.long_slug, list_fullname)) if self.request.GET.get('page') and\ self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP: templates.append('{0}/{1}/{2}_paginated.html'.format( domain_folder, self.channel.long_slug, list_fullname)) templates.append('{0}/{1}/{2}.html'.format( domain_folder, self.channel.long_slug, list_fullname)) for t in self.channel.get_ancestors()[::-1]: templates.append('{0}/{1}/{2}.html'.format( domain_folder, t.long_slug, list_fullname)) if self.request.GET.get('page') and\ self.__class__.__name__ not in\ settings.OPPS_PAGINATE_NOT_APP: templates.append('{0}/{1}/{2}_paginated.html'.format( domain_folder, t.long_slug, list_fullname)) if self.request.GET.get('page') and\ self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP: templates.append('{0}/{1}_paginated.html'.format(domain_folder, list_fullname)) templates.append('{0}/{1}/{2}.html'.format( self.model._meta.app_label, self.model._meta.module_name, list_name)) return templates def get_template_names(self): domain_folder = self.get_template_folder() template_list = self.get_template_list(domain_folder) return template_list def get_queryset(self): self.site = get_current_site(self.request) queryset = super(AlbumChannelList, self).get_queryset() filters = {} filters['site_domain'] = self.site.domain filters['date_available__lte'] = timezone.now() filters['published'] = True filters['show_on_root_channel'] = True queryset = queryset.filter(**filters) return queryset._clone()
true
true
1c45c4b27799ee041b7c97394535e06eaba9dfb4
51,275
py
Python
cogs/game/minigames/game_of_life/player.py
FellowHashbrown/omega-psi-py
4ea33cdbef15ffaa537f2c9e382de508c58093fc
[ "MIT" ]
4
2018-12-23T08:49:40.000Z
2021-03-25T16:51:43.000Z
cogs/game/minigames/game_of_life/player.py
FellowHashbrown/omega-psi-py
4ea33cdbef15ffaa537f2c9e382de508c58093fc
[ "MIT" ]
23
2020-11-03T17:40:40.000Z
2022-02-01T17:12:59.000Z
cogs/game/minigames/game_of_life/player.py
FellowHashbrown/omega-psi-py
4ea33cdbef15ffaa537f2c9e382de508c58093fc
[ "MIT" ]
1
2019-07-11T23:40:13.000Z
2019-07-11T23:40:13.000Z
from asyncio import sleep from discord import Embed from math import ceil from random import randint, choice from cogs.globals import PRIMARY_EMBED_COLOR, NUMBER_EMOJIS, LEAVE from cogs.game.minigames.base_game.player import Player from cogs.game.minigames.game_of_life.functions import choose_house from cogs.game.minigames.game_of_life.variables import ( MARRIED, GRADUATION, BRIEFCASE, SPIN, BABY, FAMILY, RISKY_ROAD, RETIRED, GIFTS, BUY_HOUSE, SELL_HOUSE, DO_NOTHING, HOUSE, LOANS, PAYDAY, GET_MONEY, PAY_MONEY, ACTION, PAYDAY_BONUS ) from util.functions import get_embed_color # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # class GameOfLifePlayer(Player): """A GameOfLifePlayer object holds information regarding a player in the Game of Life minigame. :param member: The discord.Member defining this GameOfLifePlayer object or a str clarifying this GameOfLifePlayer object as an AI player """ def __init__(self, member): super().__init__(member = member) # Game player data self.is_married = False self.is_retired = False self.is_college = False self.move_modify = False self.extra_turn = False # Other player data self.space = "c0" self.babies = 0 self.pets = 1 self.cash = 200000 self.career = None self.action_cards = 0 self.house_cards = [] self.pet_cards = 0 self.loans = 0 # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Getter # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # @property def member(self): return self.__member @property def is_ai(self): return self.__is_ai # # # # # # # # # # # # # # # # # # # # @property def is_married(self): return self.__is_married @property def is_retired(self): return self.__is_retired @property def is_college(self): return self.__is_college @property def move_modify(self): return self.__move_modify @property def extra_turn(self): return self.__extra_turn # # # # # # # # # # # # # # # # # # # # @property def space(self): return self.__space @property def babies(self): return self.__babies @property def pets(self): return self.__pets @property def cash(self): return self.__cash @property def career(self): return self.__career @property def action_cards(self): return self.__action_cards @property def house_cards(self): return self.__house_cards @property def pet_cards(self): return self.__pet_cards @property def loans(self): return self.__loans # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Setter # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # @member.setter def member(self, member): self.__member = member @is_ai.setter def is_ai(self, is_ai): self.__is_ai = is_ai # # # # # # # # # # # # # # # # # # # # @is_married.setter def is_married(self, is_married): self.__is_married = is_married @is_retired.setter def is_retired(self, is_retired): self.__is_retired = is_retired @is_college.setter def is_college(self, is_college): self.__is_college = is_college @move_modify.setter def move_modify(self, move_modify): self.__move_modify = move_modify @extra_turn.setter def extra_turn(self, extra_turn): self.__extra_turn = extra_turn # # # # # # # # # # # # # # # # # # # # @space.setter def space(self, space): self.__space = space @babies.setter def babies(self, babies): self.__babies = babies @pets.setter def pets(self, pets): self.__pets = pets @cash.setter def cash(self, cash): self.__cash = cash @career.setter def career(self, career): self.__career = career @action_cards.setter def action_cards(self, action_cards): self.__action_cards = action_cards @house_cards.setter def house_cards(self, house_cards): self.__house_cards = house_cards @pet_cards.setter def pet_cards(self, pet_cards): self.__pet_cards = pet_cards @loans.setter def loans(self, loans): self.__loans = loans # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Play Methods # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # async def setup(self, game): """Let's the player decide if they are going to college or choosing a career. :param game: The game object that this player is connected to """ # Check if the player is an AI and simulate a decision if self.is_ai: await sleep(2) college = randint(1, 10) % 2 == 0 # The player is a real person else: # Ask the player if they want to go to college or into a career college = False message = await game.ctx.send( self.member.mention, embed = Embed( title = "College or Career?", description = "If you want to go to college, react with {}\nIf you want to go straight into a career, react with {}".format( GRADUATION, BRIEFCASE ), colour = await get_embed_color(self.member) ) ) await message.add_reaction(GRADUATION) await message.add_reaction(BRIEFCASE) # Check for the user's reaction def check_reaction(reaction, user): return ( reaction.message.id == message.id and str(reaction) in [GRADUATION, BRIEFCASE] and user.id == self.member.id ) reaction, user = await game.bot.wait_for("reaction_add", check = check_reaction) college = str(reaction) == GRADUATION # Check if the user is going to college if college: # Take 100k from the user, set their college attribute to True # and send a message saying they are going to college self.cash -= 100000 self.is_college = True self.space = "c0" await game.ctx.send( embed = Embed( title = "{} is going to college!".format( self.get_name() ), description = "{} has to pay $100,000 for tuition fees".format( self.get_name() ), colour = PRIMARY_EMBED_COLOR if self.is_ai else await get_embed_color(self.member) ) ) # Check if the user is going into a career # have them decide and then display their career else: self.space = "j0" self.career = await self.ask_for_career(game) await game.ctx.send( embed = Embed( title = "{} chose a career!".format(self.get_name()), description = str(self.career), colour = PRIMARY_EMBED_COLOR if self.is_ai else await get_embed_color(self.member) ) ) async def process_turn(self, game): """Processes the current turn for this player by waiting until they react to make their move or, if this player is an AI, choosing the best place to go :param game: The game object that this player is connected to """ # Check if the player has an extra turn, remove it self.extra_turn = False # Ask the player to spin number = await self.ask_for_spin(game, allow_leave = True) # Check if the player is leaving if number == LEAVE: await game.add_action("{} left the game!".format(self.get_name())) return LEAVE # Add the number they spun to the current turn await game.add_action( "{} {} spun a {}!".format( SPIN, self.get_name(), number ) ) # The player is not leaving, check if there is a bonus paid to any player await game.pay_bonus(number) # Check what spot the player has landed on board_space = self.next_space(game, number) self.space = board_space.current # Check if the player got any paydays if board_space.paydays_passed > 0: await game.add_action( "{} {} got {} payday{}!".format( PAYDAY, self.get_name(), board_space.paydays_passed, "s" if board_space.paydays_passed > 1 else "" ) ) # Check if the space is a pet space if board_space.type == "pet": await self.process_pet_space(game) # Check if the space is an action space elif board_space.type == "action": await game.process_action_card(board_space) # Check if the space is a house space elif board_space.type == "house": await self.ask_for_house(game) # Check if the space is a spin-to-win space elif board_space.type == "spin_to_win": await game.spin_to_win() # Check if the space is a stop space elif board_space.stop: await self.process_stop_space(game, board_space) # Check if the space is a baby space elif board_space.type in ["baby", "twins", "triplets"]: await self.process_baby_space(game, board_space) # Check if the user has to pay money or receive money elif board_space.type == "pay_money": self.cash -= board_space.amount await game.add_action( "{} {} has to pay ${:0,}!".format( PAY_MONEY, self.get_name(), board_space.amount ) ) elif board_space.type == "get_money": self.cash += board_space.amount await game.add_action( "{} {} gets paid ${:0,}!".format( GET_MONEY, self.get_name(), board_space.amount ) ) # Check if the player landed on a payday space elif board_space.type == "payday": self.cash += 100000 await game.add_action( "{} {} landed on a payday and got a $100,000 bonus!".format( PAYDAY_BONUS, self.get_name() ) ) # Sleep for 3 seconds so everyone can read what happened await sleep(3) return False async def ask_for_spin(self, game, *, is_color = False, allow_leave = False): """Let's the player spin for a number or a color If getting color, when this: returns True, the color is black returns False, the color is red :param game: The game object this player is connected to :param is_color: Whether or not to get the color of the result or just a number. (Defaults to False) :param allow_leave: Whether or not to allow the player to leave during this spin. (Defaults to False) :returns: The resulting number or whether the color is black or red """ # Check if the player is an AI, simulate waiting to spin if self.is_ai: await sleep(2) # The player is a real person, wait for their reaction to spin else: # Send the message and add the valid reactions message = await game.ctx.send( embed = Embed( title = "Spin!", description = "{}, react with {} when you're ready to spin.{}".format( self.get_name(), SPIN, "\nIf you'd like to leave, react with {}".format( LEAVE ) if allow_leave else "" ), colour = await get_embed_color(self.member) ) ) await message.add_reaction(SPIN) if allow_leave: await message.add_reaction(LEAVE) # Wait for the user's reaction def check_reaction(reaction, user): return ( reaction.message.id == message.id and user.id == self.member.id and str(reaction) in ( [SPIN, LEAVE] if allow_leave else [SPIN] ) ) reaction, user = await game.bot.wait_for("reaction_add", check = check_reaction) await message.delete() # Check if the player is leaving if str(reaction) == LEAVE: return LEAVE # Choose a random number number = None for value in range(randint(1, 10)): number = randint(1, 10) is_black = number % 2 == 0 # Check if returning color or number if is_color: return is_black return number async def ask_for_career(self, game, *, new_career = False): """Let's the player choose their career given two cards :param game: The game object this player is connected to :param new_career: Whether the player is choosing between their current career and a new one or choosing between two new careers :returns: The career the player chose :rtype: CareerCard """ # Set the target deck of career cards depending on whether or not # this player is a college player career_cards = game.career_cards if not self.is_college else game.college_career_cards # If choosing between two new careers, choose two random careers from the deck # and have them decide if not new_career: career_one = career_cards.pop(randint(0, len(career_cards) - 1)) career_two = career_cards.pop(randint(0, len(career_cards) - 1)) # The player is choosing between their current career and a new one else: career_one = self.career career_two = career_cards.pop(randint(0, len(career_cards) - 1)) # Check if the player is an AI, simulate a decision if self.is_ai: await sleep(2) return career_one if randint(1, 10) % 2 == 0 else career_two # The player is a real person, have them decide else: await game.ctx.send( embed = Embed( title = "Choose a Career!", description = "Check your DMs for your career choices!", colour = await get_embed_color(self.member) ), delete_after = 5 ) message = await self.member.send( embed = Embed( title = "Choose a Career!", description = "_ _", colour = await get_embed_color(self.member) ).add_field( name = NUMBER_EMOJIS[0], value = str(career_one), inline = False ).add_field( name = NUMBER_EMOJIS[1], value = str(career_two), inline = False ) ) await message.add_reaction(NUMBER_EMOJIS[0]) await message.add_reaction(NUMBER_EMOJIS[1]) # Wait for the user to decide which card they want def check_reaction(reaction, user): return ( reaction.message.id == message.id and user.id == self.member.id and str(reaction) in NUMBER_EMOJIS[ : 2] ) reaction, user = await game.bot.wait_for("reaction_add", check = check_reaction) if str(reaction) == NUMBER_EMOJIS[0]: return career_one return career_two async def ask_for_house(self, game, *, sell_house = False, house = None): """Let's the player decide if they want to buy a house, sell a house, or do nothing :param game: The game that this player is connected to :param sell_house: Whether or not to directly sell a player's house Note that this is primarily used when finalizing the end of a game :param house: The specific house to sell Note that this is primarily used when finalizing the end of a game :rtype: int """ # Only ask the player what they want to do if not directly selling a house action = None if not sell_house else "sell" if not sell_house: # Check if the player is an AI, simulate deciding on an action if self.is_ai: actions = ["buy", "sell", "nothing"] action = choice(actions) while action == "sell" and len(self.house_cards) == 0: action = choice(actions) await sleep(2) # The player is a real person, ask them what they want to do else: # Send a message asking them what they want to do and add # the necessary reactions message = await game.ctx.send( embed = Embed( title = "Buy, Sell, or do nothing?", description = "{}{}{}".format( "If you want to buy a house, react with {}\n".format(BUY_HOUSE), "If you want to sell a house, react with {}\n".format(SELL_HOUSE) if len(self.house_cards) > 0 else "", "If you want to do nothing, react with {}".format(DO_NOTHING) ), colour = await get_embed_color(self.member) ) ) await message.add_reaction(BUY_HOUSE) if len(self.house_cards) > 0: await message.add_reaction(SELL_HOUSE) await message.add_reaction(DO_NOTHING) # Wait for the player to react def check_reaction(reaction, user): return ( reaction.message.id == message.id and user.id == self.member.id and str(reaction) in ( [BUY_HOUSE, SELL_HOUSE, DO_NOTHING] if len(self.house_cards) > 0 else [BUY_HOUSE, DO_NOTHING] ) ) reaction, user = await game.bot.wait_for("reaction_add", check = check_reaction) if str(reaction) == BUY_HOUSE: action = "buy" elif str(reaction) == SELL_HOUSE: action = "sell" else: action = "nothing" await message.delete() # The player will buy a house if action == "buy": # Keep track of how many loans need to be taken # the chosen house # and the houses to choose from take_loans = False chosen_house = None house_cards = game.house_cards house_one = house_cards.pop(0) house_two = house_cards.pop(0) # Check if the player is an AI, choose a house intelligently and simulate a decision if self.is_ai: chosen_house = choose_house(self, house_one = house_one, house_two = house_two) take_loans = chosen_house != None # The player is a real person, have them choose a house if they want to else: message = await self.member.send( embed = Embed( title = "Choose a house!", description = "_ _", colour = await get_embed_color(self.member) ).add_field( name = NUMBER_EMOJIS[0], value = str(house_one), inline = False ).add_field( name = NUMBER_EMOJIS[1], value = str(house_two), inline = False ) ) await message.add_reaction(NUMBER_EMOJIS[0]) await message.add_reaction(NUMBER_EMOJIS[1]) # Wait for the player to decide which house they want reaction, user = await game.bot.wait_for("reaction_add", check = lambda reaction, user: ( reaction.message.id == message.id and user.id == self.member.id and str(reaction) in NUMBER_EMOJIS[ : 2] )) # The player chose the first house if str(reaction) == NUMBER_EMOJIS[0]: chosen_house = house_one # The player chose the second house elif str(reaction) == NUMBER_EMOJIS[1]: chosen_house = house_two await message.delete() # Check if the player can buy the house without loans # add the other house back to the deck if chosen_house.purchase <= self.cash: if str(reaction) == NUMBER_EMOJIS[0]: house_cards.append(house_two) else: house_cards.append(house_one) take_loans = True # The player has to take out loans for their chosen house # ask them if they still want to buy the house else: # Send a message asking the player what they want to do # and add the reactions take_loans = await self.ask_for_split_path( game, title = "Loans Needed", description = ( """ In order to buy that house, you need to take out some loans. If you want to take loans out, react with {}. If you want to cancel buying the house, react with {}. """ ), true_path = LOANS, false_path = LEAVE ) # The player wants to buy the house, check if they need to take out loans if take_loans: loans_needed = ceil((chosen_house.purchase - self.cash) / 50000) if loans_needed > 0: self.loans += loans_needed self.cash += 50000 * loans_needed # Take cash from the player to purchase the house self.cash -= chosen_house.purchase self.house_cards.append(chosen_house) await game.add_action( "{} {} bought a new house{}!\n{}".format( HOUSE, self.get_name(), " after taking out some loans" if loans_needed > 0 else "", str(chosen_house) ) ) # The player does not want to take out loans else: house_cards.append(house_one) house_cards.append(house_two) await game.add_action( "{} {} did not want to take any loans! They will not buy the house.".format( ACTION, self.get_name() ) ) # The player will sell a house elif action == "sell": # Update the turn message await game.add_action( "{} {} is selling a house!".format( HOUSE, self.get_name() ) ) # Only ask the player to choose a house if a house has not been specified # to sell directly chosen_house = house if not sell_house: # Check if the player is an AI # decide on what house to sell and sleep for 2 seconds to simulate the decision if self.is_ai: chosen_house = choose_house(self, buy = False) self.house_cards.remove(chosen_house) # The player is a real person, have them decide on the house else: # Create the embed to ask for which house to sell # and add number fields for each house the player has embed = Embed( title = "Choose a house to sell!", description = "_ _", colour = await get_embed_color(self.member) ) for index in range(len(self.house_cards)): house = self.house_cards[index] embed.add_field( name = NUMBER_EMOJIS[index], value = str(house), inline = False ) # Send the message to ask the player which house to sell # and add the valid reactions message = await self.member.send(embed = embed) for emoji in NUMBER_EMOJIS[ : len(self.house_cards)]: await message.add_reaction(emoji) # Wait for the user to choose which house to sell and delete the message reaction, user = await game.bot.wait_for("reaction_add", check = lambda reaction, user: ( reaction.message.id == message.id and user.id == self.member.id and str(reaction) in NUMBER_EMOJIS[ : len(self.house_cards)] )) await message.delete() chosen_house = self.house_cards.pop(NUMBER_EMOJIS.index(str(reaction))) # Update the turn message with which house the player is selling await game.add_action( "{} {} is selling the following house:\n{}".format( HOUSE, self.get_name(), str(chosen_house) ) ) # Have the player spin to see how much they sell their house for is_black = await self.ask_for_spin(game, is_color = True) amount = chosen_house.spin_black if is_black else chosen_house.spin_red # Update the turn message with how much the house was sold for self.cash += amount await game.add_action( "{} {} sold their house for ${:0,}".format( GET_MONEY, self.get_name(), amount ) ) # Return the amount that the player sold the house for return amount # The player will do nothing else: await game.add_action( "{} {} chose not to buy nor sell a house.".format( ACTION, self.get_name() ) ) async def ask_for_opponent(self, game, *, is_lawsuit = False): """Asks this player to choose an opponent for a competition or a lawsuit card :param game: The game that this player is connected to :param is_lawsuit: Whether or not this player is choosing an opponent for a lawsuit. (Defaults to False) :rtype: GameOfLifePlayer """ # Check if the player is an AI # the AI will choose someone with the highest salary # Also sleep for 2 seconds to simulate a decision if self.is_ai: opponent = max(game.players, key = lambda player: ( player.career.salary if (player.career and player.id != self.id) else 0 )) while opponent.id == self.id: opponent = choice(game.players) await sleep(2) # The player is a real person, ask them who they want to choose # as their opponent else: # Create the embed to ask for the opponent embed = Embed( title = "Choose an opponent!", description = "_ _", colour = await get_embed_color(self.member) ) # Add the opponents as fields to choose from # linked to number emojis opponents = list(filter(lambda player: self.id != player.id, game.players)) for index in range(len(opponents)): embed.add_field( name = NUMBER_EMOJIS[index], value = opponents[index].get_name(), inline = False ) # Send the message to the game's ctx and add the # number emoji reactions for this player to react to # in order to choose an opponent message = await game.ctx.send(embed = embed) for emoji in NUMBER_EMOJIS[ : len(opponents)]: await message.add_reaction(emoji) # Wait for the player to react with which opponent they want to choose # and then delete the message reaction, user = await game.bot.wait_for("reaction_add", check = lambda reaction, user: ( reaction.message.id == message.id and user.id == self.id and str(reaction) in NUMBER_EMOJIS[ : len(opponents)] )) await message.delete() # Get the opponent that they chose opponent = opponents[NUMBER_EMOJIS.index(str(reaction))] # If this is not for a lawsuit, update the turn message in the game if not is_lawsuit: await game.add_action( "{} {} chose {}!".format( ACTION, self.get_name(), opponent.get_name() ) ) return opponent async def ask_for_spot(self, game, message, spots, *, choose_from = 1): """Asks the player to choose a spot for their Spin to Win token. :param game: The game that this player is connected to :param message: The message to use to keep track of people's chosen spots :param spots: A dict object that holds data about spots already chosen and who chose the spot :param choose_from: The amount of spots the player can choose from. (Default is 1) """ # Let the player decide on how many spots to take for spot_choice in range(choose_from): # Check if this player is an AI, have them choose a random spot after # sleeping for 2 seconds to simulate a decision if self.is_ai: await sleep(2) spot = choice(NUMBER_EMOJIS) while str(spot) in spots: spot = choice(NUMBER_EMOJIS) # The player is a real person, have them decide on a spot as long as it's not taken yet else: # Create an embed showing the spots already taken embed = Embed( title = "Spin to Win!", description = "{}, choose your spot{}!".format( self.get_name(), "s" if choose_from - spot_choice > 1 else "" ), colour = PRIMARY_EMBED_COLOR if game.get_current_player().is_ai else await get_embed_color(game.get_current_player().member) ).add_field( name = "Spots Taken", value = "\n".join([ "{} - {}".format( str(spot), spots[spot].get_name() ) for spot in spots ]) if len(spots) > 0 else "None Taken Yet!", inline = False ) # Edit the message to update the embed # add the reactions, and wait for the player to react await message.edit(embed = embed) for emoji in NUMBER_EMOJIS: if str(emoji) not in spots: await message.add_reaction(emoji) reaction, user = await game.bot.wait_for("reaction_add", check = lambda reaction, user: ( reaction.message.id == message.id and user.id == self.id and str(reaction) in NUMBER_EMOJIS and str(reaction) not in spots )) await message.clear_reactions() # Get the player's spot spot = str(reaction) # Add the player's spot to the spots dictionary spots[spot] = self return spots async def process_pet_space(self, game): """Processes the pet space when a player lands on it :param game: The game object that this player is connected to """ # Pull a card from the game's pet card deck card = game.pet_cards.pop(0) await game.add_action(str(card)) self.pet_cards += 1 # Check if the player is collecting money if card.action == "collect": # Give the player money and update the turn message self.cash += card.amount await game.add_action( "{} {} collected ${:0,}".format( GET_MONEY, self.get_name(), card.amount ) ) # Check if the player is collecting money for each pet elif card.action == "collect_for_each": # Give the player money for as many pets as they have # and update the turn message total = self.pets * card.amount self.cash += total await game.add_action( "{} {} collected ${:0,}{}!".format( GET_MONEY, self.get_name(), card.amount, " for each pet for a total of ${:0,}".format(total) if total != card.amount else "" ) ) # Check if the player is collecting money from each player elif card.action == "collect_from_each": # Take money from each player that is not this player # and give it to this player total = (len(game.players) - 1) * card.amount for player in game.players: if player.id != self.id: player.cash -= card.amount # Update the turn message await game.add_action( "{} {} collected ${:0,}{}!".format( GET_MONEY, self.get_name(), card.amount, " from everyone for a total of ${:0,}".format(total) if total != card.amount else "" ) ) # Check if the player is paying money elif card.action == "pay": # Take money from the player and update the turn message self.cash -= card.amount await game.add_action( "{} {} had to pay the bank ${:0,}!".format( PAY_MONEY, self.get_name(), card.amount ) ) # Check if the player is paying money for each pet elif card.action == "pay_for_each": # Take money from the player for each pet they have # and update the turn message total = self.pets * card.amount self.cash -= total await game.add_action( "{} {} had to pay the bank ${:0,}{}!".format( PAY_MONEY, self.get_name(), card.amount, " for each pet for a total of ${:0,}".format(total) if total != card.amount else "" ) ) # Check if the player is competing against another player elif card.action == "compete": # Have two players compete against each other and give the # winning player the amount on this card # then update the turn message winner, _ = await game.compete(self) winner.cash += card.amount await game.add_action( "{} {} collected ${:0,} for spinning higher!".format( GET_MONEY, self.get_name(), card.amount ) ) async def process_stop_space(self, game, board_space): """Processes the stop space when this player lands on it :param game: The game object that this player is connected to :param board_space: The board space object where this player is current occupying """ # Check if the player is graduating, ask them to choose a career if board_space.type == "graduation": self.is_college = True self.career = await self.ask_for_career(game) await game.add_action( "{} {} graduated and chose a career!\n{}".format( GRADUATION, self.get_name(), str(self.career) ) ) # Check if the player is getting married elif board_space.type == "married": # Send a message saying the player got married await game.add_action( "{} {} got married!\n{}, spin for gifts from everyone!".format( MARRIED, self.get_name(), GIFTS, self.get_name() ) ) # Ask the player to spin for gifs from everyone (apart from this player) # and have each player give the gift amount # depending on the color is_black = await self.ask_for_spin(game, is_color = True) amount = 100000 if is_black else 50000 total = amount * (len(game.players) - 1) for player in filter(lambda player: player.id != self.id, game.players): player.cash -= amount self.cash += total # Update the turn message saying how much money the player got in total await game.add_action( "{} {} collected ${:0,}{}!".format( GET_MONEY, self.get_name(), amount, " from everyone for a total of ${:0,}".format( total ) if total != amount else "" ) ) # Check if the player is spinning for babies elif board_space.type == "spin_for_babies": # Update the turn message saying the player is spinning for babies await game.add_action( "{} {} is spinning to see if they have any more babies!".format( BABY, self.get_name() ) ) # Ask the player to spin to see how many babies they get # and update the turn message value = await self.ask_for_spin(game) self.babies += board_space.spin[str(value)] await game.add_action( "{} {} had {} bab{}!".format( BABY, self.get_name(), board_space.spin[str(value)], "ies" if board_space.spin[str(value)] != 1 else "y" ) ) # Check if the player is deciding on night school elif board_space.type == "night_school": # Check if the player is an AI # sleep for 2 seconds to simulate a decision night_school = False if self.is_ai: night_school = randint(1, 10) % 2 == 0 await sleep(2) # The player is a real person, let them decide else: # Send the message asking the player to decide night_school = await self.ask_for_split_path( game, title = "Night School?", description = ( """ If you want to go to Night School, react with {}. If you don't want to go, react with {}. """ ), true_path = GRADUATION, false_path = SPIN ) # Make sure the player moves in the correct direction self.move_modify = night_school # Check if the player wants to go to night school if night_school: self.is_college = True self.cash -= 100000 await game.add_action( "{} {} had to pay ${:0,} to go to Night School!".format( PAY_MONEY, self.get_name(), 100000 ) ) # Ask for a new career self.career = await self.ask_for_career(game, new_career = True) # The player does not want to go to night school else: await game.add_action( "{} {} chose not to go to Night School!".format( ACTION, self.get_name() ) ) # Check if the player is deciding on the family path elif board_space.type == "family_path": # Check if this player is an AI, sleep for 2 seconds to simulate a decision family_path = False if self.is_ai: family_path = randint(1, 10) % 2 == 0 await sleep(2) # This player is a real player, let them decide else: # Send the message asking the player to decide family_path = await self.ask_for_split_path( game, title = "Family Path?", description = ( """ If you want to go down the Family Path, react with {}. If you don't want to, react with {}. """ ), true_path = FAMILY, false_path = SPIN ) # Update the turn message about the player's decision self.move_modify = family_path await game.add_action( "{} {} is{} going down the Family Path!".format( FAMILY if family_path else ACTION, self.get_name(), "" if family_path else " not" ) ) # Check if the player is deciding on risky road elif board_space.type == "risky_road": # Check if this player is an AI, sleep for 2 seconds to simulate a decision risky_road = False if self.is_ai: risky_road = randint(1, 10) % 2 == 0 await sleep(2) # This player is a real player, let them decide else: # Send the message asking the player to decide risky_road = await self.ask_for_split_path( game, title = "Risky Road?", description = ( """ If you want to go down the Risky Road, react with {}. If you don't want to, react with {}. """ ), true_path = RISKY_ROAD, false_path = SPIN ) # Update the turn message about the player's decision self.move_modify = risky_road await game.add_action( "{} {} is{} going down the Risky Road!".format( RISKY_ROAD if risky_road else ACTION, self.get_name(), "" if risky_road else " not" ) ) # Check if the player is retiring elif board_space.type == "retirement": # Give the player their retirement money # have the player retire # and update the turn message amount = 100000 * (5 - len(game.get_retired())) self.cash += amount self.is_retired = True await game.add_action( "{} {} has retired and collected ${:0,}".format( RETIRED, self.get_name(), amount ) ) # The player takes another turn if the space is not the # retirement space if board_space.type != "retirement": self.extra_turn = True async def process_baby_space(self, game, board_space): """Processes the baby space when this player lands on it :param game: The game object that this player is connected to :param board_space: The board space object where this player is current occupying """ # Determine what action the baby space is if board_space.type == "baby": description = "{} {} had a baby!" self.babies += 1 elif board_space.type == "twins": description = "{} {} had twins!" self.babies += 2 elif board_space.type == "triplets": description = "{} {} had triplets!" self.babies += 3 # Update the turn message await game.add_action(description.format(BABY, self.get_name())) async def ask_for_split_path(self, game, *, title = None, description = None, true_path = None, false_path = None): """Asks the player to decide on a split path :param game: The game object that this player is connected to :param title: The title of the embed to send :param description: A formatted description of the embed to send Note that this description must include format braces to take into account the true_path and false_path emojis :param true_path: An emoji for the player to go towards the new split path :param false_path: An emoji for the player to stay on the same path :rtype: bool """ # Send a message asking the player if they want to go down the new path or stay on # the current path message = await game.ctx.send( embed = Embed( title = title, description = description.format(true_path, false_path), colour = await get_embed_color(self.member) ) ) await message.add_reaction(true_path) await message.add_reaction(false_path) # Wait for the user to react with their choice and delete the message reaction, user = await game.bot.wait_for("reaction_add", check = lambda reaction, user: ( reaction.message.id == message.id and user.id == self.id and str(reaction) in [true_path, false_path] )) await message.delete() return str(reaction) == true_path # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Other Methods # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def next_space(self, game, number): """Moves the player as many moves as specfied by number :param game: The game object that this player is connected to :param number: The amount of spaces to move the player """ # Keep track of how many moves have been made, # how many paydays were passed # and the board of the game moves = 0 paydays = 0 board = game.board current = self.space while True: # Check if the player moves in a specific way # For example, if the player comes to a stop sign # and there are two different paths that can be taken, # the player will have decided on which path to take if board[current].next_true != None: if self.move_modify: current = board[current].next_true else: current = board[current].next_false # The player moves normally else: current = board[current].next # Check if the space reached is a stop sign or all the moves have been made moves += 1 if board[current].stop or moves == number: # Check if the current space is a payday # give the player a bonus payday if board[current].type == "payday": paydays += 1 break # The player passed a payday if board[current].type == "payday": paydays += 1 # Add the player's payday to their cash for payday in range(paydays): self.cash += self.career.salary # Return a JSON object describing the player's current board state board[current].paydays_passed = paydays board[current].current = current return board[current] def get_name(self): """Returns the name of this Player. If the player is a discord.Member object, the name will be their username + discriminator :rtype: str """ if self.is_ai: return self.member return str(self.member) def give_payday(self, *, paydays_passed = 1): """Gives the player a payday from their career :param paydays_passed: The amount of paydays to give to the player """ self.cash += self.career.salary * paydays_passed
38.122677
144
0.505958
from asyncio import sleep from discord import Embed from math import ceil from random import randint, choice from cogs.globals import PRIMARY_EMBED_COLOR, NUMBER_EMOJIS, LEAVE from cogs.game.minigames.base_game.player import Player from cogs.game.minigames.game_of_life.functions import choose_house from cogs.game.minigames.game_of_life.variables import ( MARRIED, GRADUATION, BRIEFCASE, SPIN, BABY, FAMILY, RISKY_ROAD, RETIRED, GIFTS, BUY_HOUSE, SELL_HOUSE, DO_NOTHING, HOUSE, LOANS, PAYDAY, GET_MONEY, PAY_MONEY, ACTION, PAYDAY_BONUS ) from util.functions import get_embed_color PAY_MONEY, self.get_name(), board_space.amount ) ) elif board_space.type == "get_money": self.cash += board_space.amount await game.add_action( "{} {} gets paid ${:0,}!".format( GET_MONEY, self.get_name(), board_space.amount ) ) # Check if the player landed on a payday space elif board_space.type == "payday": self.cash += 100000 await game.add_action( "{} {} landed on a payday and got a $100,000 bonus!".format( PAYDAY_BONUS, self.get_name() ) ) # Sleep for 3 seconds so everyone can read what happened await sleep(3) return False async def ask_for_spin(self, game, *, is_color = False, allow_leave = False): # Check if the player is an AI, simulate waiting to spin if self.is_ai: await sleep(2) # The player is a real person, wait for their reaction to spin else: # Send the message and add the valid reactions message = await game.ctx.send( embed = Embed( title = "Spin!", description = "{}, react with {} when you're ready to spin.{}".format( self.get_name(), SPIN, "\nIf you'd like to leave, react with {}".format( LEAVE ) if allow_leave else "" ), colour = await get_embed_color(self.member) ) ) await message.add_reaction(SPIN) if allow_leave: await message.add_reaction(LEAVE) # Wait for the user's reaction def check_reaction(reaction, user): return ( reaction.message.id == message.id and user.id == self.member.id and str(reaction) in ( [SPIN, LEAVE] if allow_leave else [SPIN] ) ) reaction, user = await game.bot.wait_for("reaction_add", check = check_reaction) await message.delete() if str(reaction) == LEAVE: return LEAVE number = None for value in range(randint(1, 10)): number = randint(1, 10) is_black = number % 2 == 0 if is_color: return is_black return number async def ask_for_career(self, game, *, new_career = False): career_cards = game.career_cards if not self.is_college else game.college_career_cards if not new_career: career_one = career_cards.pop(randint(0, len(career_cards) - 1)) career_two = career_cards.pop(randint(0, len(career_cards) - 1)) else: career_one = self.career career_two = career_cards.pop(randint(0, len(career_cards) - 1)) if self.is_ai: await sleep(2) return career_one if randint(1, 10) % 2 == 0 else career_two else: await game.ctx.send( embed = Embed( title = "Choose a Career!", description = "Check your DMs for your career choices!", colour = await get_embed_color(self.member) ), delete_after = 5 ) message = await self.member.send( embed = Embed( title = "Choose a Career!", description = "_ _", colour = await get_embed_color(self.member) ).add_field( name = NUMBER_EMOJIS[0], value = str(career_one), inline = False ).add_field( name = NUMBER_EMOJIS[1], value = str(career_two), inline = False ) ) await message.add_reaction(NUMBER_EMOJIS[0]) await message.add_reaction(NUMBER_EMOJIS[1]) def check_reaction(reaction, user): return ( reaction.message.id == message.id and user.id == self.member.id and str(reaction) in NUMBER_EMOJIS[ : 2] ) reaction, user = await game.bot.wait_for("reaction_add", check = check_reaction) if str(reaction) == NUMBER_EMOJIS[0]: return career_one return career_two async def ask_for_house(self, game, *, sell_house = False, house = None): action = None if not sell_house else "sell" if not sell_house: if self.is_ai: actions = ["buy", "sell", "nothing"] action = choice(actions) while action == "sell" and len(self.house_cards) == 0: action = choice(actions) await sleep(2) else: message = await game.ctx.send( embed = Embed( title = "Buy, Sell, or do nothing?", description = "{}{}{}".format( "If you want to buy a house, react with {}\n".format(BUY_HOUSE), "If you want to sell a house, react with {}\n".format(SELL_HOUSE) if len(self.house_cards) > 0 else "", "If you want to do nothing, react with {}".format(DO_NOTHING) ), colour = await get_embed_color(self.member) ) ) await message.add_reaction(BUY_HOUSE) if len(self.house_cards) > 0: await message.add_reaction(SELL_HOUSE) await message.add_reaction(DO_NOTHING) def check_reaction(reaction, user): return ( reaction.message.id == message.id and user.id == self.member.id and str(reaction) in ( [BUY_HOUSE, SELL_HOUSE, DO_NOTHING] if len(self.house_cards) > 0 else [BUY_HOUSE, DO_NOTHING] ) ) reaction, user = await game.bot.wait_for("reaction_add", check = check_reaction) if str(reaction) == BUY_HOUSE: action = "buy" elif str(reaction) == SELL_HOUSE: action = "sell" else: action = "nothing" await message.delete() if action == "buy": take_loans = False chosen_house = None house_cards = game.house_cards house_one = house_cards.pop(0) house_two = house_cards.pop(0) if self.is_ai: chosen_house = choose_house(self, house_one = house_one, house_two = house_two) take_loans = chosen_house != None else: message = await self.member.send( embed = Embed( title = "Choose a house!", description = "_ _", colour = await get_embed_color(self.member) ).add_field( name = NUMBER_EMOJIS[0], value = str(house_one), inline = False ).add_field( name = NUMBER_EMOJIS[1], value = str(house_two), inline = False ) ) await message.add_reaction(NUMBER_EMOJIS[0]) await message.add_reaction(NUMBER_EMOJIS[1]) reaction, user = await game.bot.wait_for("reaction_add", check = lambda reaction, user: ( reaction.message.id == message.id and user.id == self.member.id and str(reaction) in NUMBER_EMOJIS[ : 2] )) if str(reaction) == NUMBER_EMOJIS[0]: chosen_house = house_one elif str(reaction) == NUMBER_EMOJIS[1]: chosen_house = house_two await message.delete() if chosen_house.purchase <= self.cash: if str(reaction) == NUMBER_EMOJIS[0]: house_cards.append(house_two) else: house_cards.append(house_one) take_loans = True else: take_loans = await self.ask_for_split_path( game, title = "Loans Needed", description = ( """ In order to buy that house, you need to take out some loans. If you want to take loans out, react with {}. If you want to cancel buying the house, react with {}. """ ), true_path = LOANS, false_path = LEAVE ) if take_loans: loans_needed = ceil((chosen_house.purchase - self.cash) / 50000) if loans_needed > 0: self.loans += loans_needed self.cash += 50000 * loans_needed self.cash -= chosen_house.purchase self.house_cards.append(chosen_house) await game.add_action( "{} {} bought a new house{}!\n{}".format( HOUSE, self.get_name(), " after taking out some loans" if loans_needed > 0 else "", str(chosen_house) ) ) else: house_cards.append(house_one) house_cards.append(house_two) await game.add_action( "{} {} did not want to take any loans! They will not buy the house.".format( ACTION, self.get_name() ) ) elif action == "sell": await game.add_action( "{} {} is selling a house!".format( HOUSE, self.get_name() ) ) chosen_house = house if not sell_house: if self.is_ai: chosen_house = choose_house(self, buy = False) self.house_cards.remove(chosen_house) else: embed = Embed( title = "Choose a house to sell!", description = "_ _", colour = await get_embed_color(self.member) ) for index in range(len(self.house_cards)): house = self.house_cards[index] embed.add_field( name = NUMBER_EMOJIS[index], value = str(house), inline = False ) message = await self.member.send(embed = embed) for emoji in NUMBER_EMOJIS[ : len(self.house_cards)]: await message.add_reaction(emoji) reaction, user = await game.bot.wait_for("reaction_add", check = lambda reaction, user: ( reaction.message.id == message.id and user.id == self.member.id and str(reaction) in NUMBER_EMOJIS[ : len(self.house_cards)] )) await message.delete() chosen_house = self.house_cards.pop(NUMBER_EMOJIS.index(str(reaction))) await game.add_action( "{} {} is selling the following house:\n{}".format( HOUSE, self.get_name(), str(chosen_house) ) ) is_black = await self.ask_for_spin(game, is_color = True) amount = chosen_house.spin_black if is_black else chosen_house.spin_red self.cash += amount await game.add_action( "{} {} sold their house for ${:0,}".format( GET_MONEY, self.get_name(), amount ) ) return amount else: await game.add_action( "{} {} chose not to buy nor sell a house.".format( ACTION, self.get_name() ) ) async def ask_for_opponent(self, game, *, is_lawsuit = False): if self.is_ai: opponent = max(game.players, key = lambda player: ( player.career.salary if (player.career and player.id != self.id) else 0 )) while opponent.id == self.id: opponent = choice(game.players) await sleep(2) else: embed = Embed( title = "Choose an opponent!", description = "_ _", colour = await get_embed_color(self.member) ) opponents = list(filter(lambda player: self.id != player.id, game.players)) for index in range(len(opponents)): embed.add_field( name = NUMBER_EMOJIS[index], value = opponents[index].get_name(), inline = False ) # number emoji reactions for this player to react to # in order to choose an opponent message = await game.ctx.send(embed = embed) for emoji in NUMBER_EMOJIS[ : len(opponents)]: await message.add_reaction(emoji) # Wait for the player to react with which opponent they want to choose # and then delete the message reaction, user = await game.bot.wait_for("reaction_add", check = lambda reaction, user: ( reaction.message.id == message.id and user.id == self.id and str(reaction) in NUMBER_EMOJIS[ : len(opponents)] )) await message.delete() # Get the opponent that they chose opponent = opponents[NUMBER_EMOJIS.index(str(reaction))] # If this is not for a lawsuit, update the turn message in the game if not is_lawsuit: await game.add_action( "{} {} chose {}!".format( ACTION, self.get_name(), opponent.get_name() ) ) return opponent async def ask_for_spot(self, game, message, spots, *, choose_from = 1): # Let the player decide on how many spots to take for spot_choice in range(choose_from): # Check if this player is an AI, have them choose a random spot after # sleeping for 2 seconds to simulate a decision if self.is_ai: await sleep(2) spot = choice(NUMBER_EMOJIS) while str(spot) in spots: spot = choice(NUMBER_EMOJIS) # The player is a real person, have them decide on a spot as long as it's not taken yet else: embed = Embed( title = "Spin to Win!", description = "{}, choose your spot{}!".format( self.get_name(), "s" if choose_from - spot_choice > 1 else "" ), colour = PRIMARY_EMBED_COLOR if game.get_current_player().is_ai else await get_embed_color(game.get_current_player().member) ).add_field( name = "Spots Taken", value = "\n".join([ "{} - {}".format( str(spot), spots[spot].get_name() ) for spot in spots ]) if len(spots) > 0 else "None Taken Yet!", inline = False ) await message.edit(embed = embed) for emoji in NUMBER_EMOJIS: if str(emoji) not in spots: await message.add_reaction(emoji) reaction, user = await game.bot.wait_for("reaction_add", check = lambda reaction, user: ( reaction.message.id == message.id and user.id == self.id and str(reaction) in NUMBER_EMOJIS and str(reaction) not in spots )) await message.clear_reactions() spot = str(reaction) # Add the player's spot to the spots dictionary spots[spot] = self return spots async def process_pet_space(self, game): card = game.pet_cards.pop(0) await game.add_action(str(card)) self.pet_cards += 1 # Check if the player is collecting money if card.action == "collect": # Give the player money and update the turn message self.cash += card.amount await game.add_action( "{} {} collected ${:0,}".format( GET_MONEY, self.get_name(), card.amount ) ) # Check if the player is collecting money for each pet elif card.action == "collect_for_each": # Give the player money for as many pets as they have # and update the turn message total = self.pets * card.amount self.cash += total await game.add_action( "{} {} collected ${:0,}{}!".format( GET_MONEY, self.get_name(), card.amount, " for each pet for a total of ${:0,}".format(total) if total != card.amount else "" ) ) # Check if the player is collecting money from each player elif card.action == "collect_from_each": # Take money from each player that is not this player # and give it to this player total = (len(game.players) - 1) * card.amount for player in game.players: if player.id != self.id: player.cash -= card.amount # Update the turn message await game.add_action( "{} {} collected ${:0,}{}!".format( GET_MONEY, self.get_name(), card.amount, " from everyone for a total of ${:0,}".format(total) if total != card.amount else "" ) ) # Check if the player is paying money elif card.action == "pay": # Take money from the player and update the turn message self.cash -= card.amount await game.add_action( "{} {} had to pay the bank ${:0,}!".format( PAY_MONEY, self.get_name(), card.amount ) ) # Check if the player is paying money for each pet elif card.action == "pay_for_each": # Take money from the player for each pet they have # and update the turn message total = self.pets * card.amount self.cash -= total await game.add_action( "{} {} had to pay the bank ${:0,}{}!".format( PAY_MONEY, self.get_name(), card.amount, " for each pet for a total of ${:0,}".format(total) if total != card.amount else "" ) ) # Check if the player is competing against another player elif card.action == "compete": # Have two players compete against each other and give the # winning player the amount on this card # then update the turn message winner, _ = await game.compete(self) winner.cash += card.amount await game.add_action( "{} {} collected ${:0,} for spinning higher!".format( GET_MONEY, self.get_name(), card.amount ) ) async def process_stop_space(self, game, board_space): # Check if the player is graduating, ask them to choose a career if board_space.type == "graduation": self.is_college = True self.career = await self.ask_for_career(game) await game.add_action( "{} {} graduated and chose a career!\n{}".format( GRADUATION, self.get_name(), str(self.career) ) ) # Check if the player is getting married elif board_space.type == "married": # Send a message saying the player got married await game.add_action( "{} {} got married!\n{}, spin for gifts from everyone!".format( MARRIED, self.get_name(), GIFTS, self.get_name() ) ) # Ask the player to spin for gifs from everyone (apart from this player) # and have each player give the gift amount # depending on the color is_black = await self.ask_for_spin(game, is_color = True) amount = 100000 if is_black else 50000 total = amount * (len(game.players) - 1) for player in filter(lambda player: player.id != self.id, game.players): player.cash -= amount self.cash += total # Update the turn message saying how much money the player got in total await game.add_action( "{} {} collected ${:0,}{}!".format( GET_MONEY, self.get_name(), amount, " from everyone for a total of ${:0,}".format( total ) if total != amount else "" ) ) # Check if the player is spinning for babies elif board_space.type == "spin_for_babies": # Update the turn message saying the player is spinning for babies await game.add_action( "{} {} is spinning to see if they have any more babies!".format( BABY, self.get_name() ) ) # Ask the player to spin to see how many babies they get # and update the turn message value = await self.ask_for_spin(game) self.babies += board_space.spin[str(value)] await game.add_action( "{} {} had {} bab{}!".format( BABY, self.get_name(), board_space.spin[str(value)], "ies" if board_space.spin[str(value)] != 1 else "y" ) ) # Check if the player is deciding on night school elif board_space.type == "night_school": # Check if the player is an AI # sleep for 2 seconds to simulate a decision night_school = False if self.is_ai: night_school = randint(1, 10) % 2 == 0 await sleep(2) # The player is a real person, let them decide else: # Send the message asking the player to decide night_school = await self.ask_for_split_path( game, title = "Night School?", description = ( """ If you want to go to Night School, react with {}. If you don't want to go, react with {}. """ ), true_path = GRADUATION, false_path = SPIN ) self.move_modify = night_school if night_school: self.is_college = True self.cash -= 100000 await game.add_action( "{} {} had to pay ${:0,} to go to Night School!".format( PAY_MONEY, self.get_name(), 100000 ) ) self.career = await self.ask_for_career(game, new_career = True) else: await game.add_action( "{} {} chose not to go to Night School!".format( ACTION, self.get_name() ) ) elif board_space.type == "family_path": family_path = False if self.is_ai: family_path = randint(1, 10) % 2 == 0 await sleep(2) else: family_path = await self.ask_for_split_path( game, title = "Family Path?", description = ( """ If you want to go down the Family Path, react with {}. If you don't want to, react with {}. """ ), true_path = FAMILY, false_path = SPIN ) # Update the turn message about the player's decision self.move_modify = family_path await game.add_action( "{} {} is{} going down the Family Path!".format( FAMILY if family_path else ACTION, self.get_name(), "" if family_path else " not" ) ) elif board_space.type == "risky_road": risky_road = False if self.is_ai: risky_road = randint(1, 10) % 2 == 0 await sleep(2) else: risky_road = await self.ask_for_split_path( game, title = "Risky Road?", description = ( """ If you want to go down the Risky Road, react with {}. If you don't want to, react with {}. """ ), true_path = RISKY_ROAD, false_path = SPIN ) # Update the turn message about the player's decision self.move_modify = risky_road await game.add_action( "{} {} is{} going down the Risky Road!".format( RISKY_ROAD if risky_road else ACTION, self.get_name(), "" if risky_road else " not" ) ) elif board_space.type == "retirement": amount = 100000 * (5 - len(game.get_retired())) self.cash += amount self.is_retired = True await game.add_action( "{} {} has retired and collected ${:0,}".format( RETIRED, self.get_name(), amount ) ) if board_space.type != "retirement": self.extra_turn = True async def process_baby_space(self, game, board_space): if board_space.type == "baby": description = "{} {} had a baby!" self.babies += 1 elif board_space.type == "twins": description = "{} {} had twins!" self.babies += 2 elif board_space.type == "triplets": description = "{} {} had triplets!" self.babies += 3 await game.add_action(description.format(BABY, self.get_name())) async def ask_for_split_path(self, game, *, title = None, description = None, true_path = None, false_path = None): message = await game.ctx.send( embed = Embed( title = title, description = description.format(true_path, false_path), colour = await get_embed_color(self.member) ) ) await message.add_reaction(true_path) await message.add_reaction(false_path) reaction, user = await game.bot.wait_for("reaction_add", check = lambda reaction, user: ( reaction.message.id == message.id and user.id == self.id and str(reaction) in [true_path, false_path] )) await message.delete() return str(reaction) == true_path
true
true
1c45c55d868ffd36fb6e4d51f703e1ffad0a1d37
12,262
py
Python
apps/usuario/view/views_perfil.py
Ajerhy/proyectosigetebr
5b63f194bbe06adb92d1cdbba93d1e0028b4164f
[ "MIT" ]
1
2020-05-11T13:29:41.000Z
2020-05-11T13:29:41.000Z
apps/usuario/view/views_perfil.py
Ajerhy/proyectosigetebr
5b63f194bbe06adb92d1cdbba93d1e0028b4164f
[ "MIT" ]
11
2020-02-12T03:19:44.000Z
2022-03-12T00:10:31.000Z
apps/usuario/view/views_perfil.py
Ajerhy/proyectosigetebr
5b63f194bbe06adb92d1cdbba93d1e0028b4164f
[ "MIT" ]
null
null
null
from django.shortcuts import redirect, render from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.messages.views import SuccessMessageMixin from django.views.generic import (CreateView, UpdateView, DetailView, TemplateView, View, DeleteView,ListView) from django.shortcuts import render, redirect, get_object_or_404 from django.http import (HttpResponseRedirect,JsonResponse, HttpResponse,Http404) from django.contrib import messages from django.contrib.auth.hashers import check_password from django.contrib.auth import authenticate from django.contrib.auth import login as login_django from django.contrib.auth import logout as logout_django from django.contrib.auth.decorators import login_required from django.contrib.auth import update_session_auth_hash from apps.usuario.templatetags.utils import get_ip from django.urls import reverse_lazy, reverse from django.contrib.auth.decorators import login_required import json from apps.usuario.form.forms_perfil import LoginUsuarioPerfilForm,\ PasswordUsuarioPerfilForm,EditarUsuarioPerfilForm,\ PerfilFrom from django.db.models import Q from apps.usuario.models import Perfil from apps.contrato.models import Persona from apps.contrato.models import Cliente from apps.terreno.models import Manzano,Lote #Login class LoginPerfilView(TemplateView,LoginRequiredMixin): login_url = 'usuario:index' template_name = "sigetebr/apps/usuario/index.html"#url success_url = reverse_lazy("usuario:dashboard")#ur def get_context_data(self, **kwargs): context = super(LoginPerfilView, self).get_context_data(**kwargs) return context def dispatch(self, request, *args, **kwargs): if request.user.is_authenticated: return HttpResponseRedirect(self.success_url) return super(LoginPerfilView, self).dispatch(request, *args, **kwargs) def post(self, request, *args, **kwargs): form = LoginUsuarioPerfilForm(request.POST, request=request) if form.is_valid(): #user = Perfil.objects.filter(usuario=request.POST.get('usuario')).first() perfil = Perfil.objects.filter(usuario=request.POST.get('usuario')).first() if perfil is not None: if perfil.estado: perfil = authenticate( usuario=request.POST.get('usuario'), password=request.POST.get('password')) if perfil is not None: login_django(request, perfil) return redirect('usuario:dashboard') #return HttpResponseRedirect('usuarios:dashboard') return render(request, self.template_name, { "error": True, "message": "Tu nombre de usuario y contraseña no coinciden. Inténtalo de nuevo."} ) return render(request, self.template_name, { "error": True, "message": "Su cuenta está inactiva. Por favor, póngase en contacto con el administrador"} ) return render(request, self.template_name, { "error": True, "message": "Tu cuenta no se encuentra. Por favor, póngase en contacto con el administrador"} ) return render(request, self.template_name, { # "error": True, # "message": "Tu nombre de Usuario y Contraseña no coinciden. Inténtalo de nuevo." "form": form }) #Dashboard class DashboardView(LoginRequiredMixin,TemplateView): template_name = 'sigetebr/apps/dashboard.html' login_url = 'usuario:index' def get_context_data(self, **kwargs): context = super(DashboardView, self).get_context_data(**kwargs) manzanostodo = Manzano.objects.all() manzanosactiva = Manzano.objects.exclude(estado='False') context["manzanos"] = manzanostodo context["manzano_count"] = manzanosactiva lotestodo = Lote.objects.all() lotesactiva = Lote.objects.exclude(estado='False') context["lotes"] = lotestodo context["lote_count"] = lotesactiva usuariotodo = Perfil.objects.all() usuariodmin = Perfil.objects.exclude(is_superuser='True') usuarioactiva = Perfil.objects.exclude(is_active='True') context["usuario_count"] = usuarioactiva context["usuarios"] = usuariotodo personatodo = Persona.objects.all() personaactiva = Persona.objects.exclude(estado='False') context["persona_count"] = personaactiva context["personas"] = personatodo clientetodo = Cliente.objects.all() clienteactiva = Cliente.objects.exclude(estado='False') context["cliente_count"] = clienteactiva context["clientes"] = clientetodo return context """ Funciones """ #Salir @login_required(login_url='usuario:index') def LogoutView(request): logout_django(request) return redirect('usuario:index') #Usuario Perfil Usuario class UsuarioPerfilDetalleView(LoginRequiredMixin,DetailView): model = Perfil template_name = 'sigetebr/apps/usuario/configuracion/perfil_usuario.html' # url slug_field = 'usuario'#que campo de la base de datos slug_url_kwarg = 'usuario_url'#que campo de la url login_url = 'usuarios:index' #Usuario Perfil Actualizar Usuario class UsuarioPerfilEditarView(SuccessMessageMixin,LoginRequiredMixin,UpdateView): model = Perfil form_class = EditarUsuarioPerfilForm template_name = 'sigetebr/apps/usuario/configuracion/perfil_form.html' # url success_url = reverse_lazy('usuarios:perfil_actualizar') # success_message = "Tu usuario ha sido actualizado" context_object_name = "user_obj" login_url = 'usuarios:index' def form_valid(self, form): messages.success(self.request, "Tu Perfil Usuario ha sido actualizado") return super(UsuarioPerfilEditarView, self).form_valid(form) def get_object(self, queryset=None): return self.request.user #Usuario Perfil Actualizar Password Usuario @login_required(login_url='usuarios:index') def passwordusuarioview(request): template_name = 'sigetebr/apps/usuario/configuracion/perfil_password.html' # url form = PasswordUsuarioPerfilForm(request.POST or None) if request.method == 'POST': if form.is_valid(): actual = request.POST.get('password') nuevo = request.POST.get('password') confirma =request.POST.get('confimar_password') print(actual) print(nuevo) print(confirma) if not check_password(request.POST.get('password'), request.user.password): messages.warning(request, 'Password Actual no coinciden!') else: if authenticate(usuario = request.user.usuario,password = request.POST.get('password')): request.user.set_password(request.POST.get('new_password')) request.user.save() update_session_auth_hash(request, request.user) messages.success(request, 'Password Actualizado!') #redirect() else: messages.error(request, 'Verifique su Password por favor!') context = {'form': form} return render(request, template_name, context) USUARIO_FIELDS = [ {'string': 'N°', 'field': 'numero'}, {'string': 'Usuario', 'field': 'usuario'}, {'string': 'Nombres', 'field': 'nombre'}, {'string': 'Email', 'field': 'email'}, {'string': 'Roles', 'field': 'roles'}, {'string': 'Estado', 'field': 'estado'}, {'string': 'Acciones', 'field': 'acciones'}, ] #class PerfilListarView(LoginRequiredMixin,generic.ListView): class PerfilListarView(LoginRequiredMixin,TemplateView): model = Perfil template_name = "sigetebr/apps/usuario/perfil/listar.html" #context_object_name = "list_usuario" login_url = 'usuario:index' def get_queryset(self): queryset = self.model.objects.all() request_post = self.request.POST print(request_post,"Usuario") if request_post: if request_post.get('usuario'): queryset = queryset.filter( usuario__icontains=request_post.get('usuario')) if request_post.get('email'): queryset = queryset.filter( email__icontains=request_post.get('email')) print(queryset, "Resultado") return queryset def get_context_data(self, **kwargs): context = super(PerfilListarView, self).get_context_data(**kwargs) context["list_perfil"] = self.get_queryset() context['fields'] = USUARIO_FIELDS context["per_page"] = self.request.POST.get('per_page') search = False if ( self.request.POST.get('usuario') or self.request.POST.get('email') ): search = True context["search"] = search return context def post(self, request, *args, **kwargs): context = self.get_context_data(**kwargs) return self.render_to_response(context) #Perfil Crear class PerfilCrearView(SuccessMessageMixin,LoginRequiredMixin,CreateView): model = Perfil template_name = "sigetebr/apps/usuario/perfil/form.html" context_object_name = "obj" form_class = PerfilFrom success_url = reverse_lazy("usuario:listar_perfil") success_message = "Perfil de Usuario Creado Exitosamente" login_url = 'usuario:index' #Perfil Editar class PerfilEditarView(SuccessMessageMixin,LoginRequiredMixin,UpdateView): model = Perfil template_name = "sigetebr/apps/usuario/perfil/form.html" context_object_name = "obj_usuario" form_class = PerfilFrom success_url = reverse_lazy("usuario:listar_perfil") success_message = "Perfil de Usuario Actualizada Satisfactoriamente" login_url = 'usuario:index' #Perfil Detalle class PerfilDetallesView(LoginRequiredMixin,DetailView): model = Perfil template_name = 'sigetebr/apps/usuario/perfil/detalle.html'#url slug_field = 'usuario'#que campo de la base de datos context_object_name = 'obj' slug_url_kwarg = 'usuario_url'#que campo de la url login_url = 'usuario:index' #Perfil Eliminar class PerfilEliminarView(SuccessMessageMixin,LoginRequiredMixin,DeleteView): model = Perfil template_name='sigetebr/apps/usuario/perfil/eliminar.html' context_object_name='obj' success_url = reverse_lazy("usuario:listar_perfil") success_message="Perfil de Usuario Eliminada Exitosamente" login_url = 'usuario:index' #Desactivar @login_required(login_url='usuario:index') def perfildesactivar(request, id): perfil = Perfil.objects.filter(pk=id).first() contexto={} template_name = 'sigetebr/apps/usuario/perfil/estado_desactivar.html'#url if not perfil: return redirect('usuario:listar_perfil') if request.method=='GET': contexto={'obj':perfil} if request.method=='POST': perfil.estado=False perfil.save() return redirect('usuario:listar_perfil') return render(request,template_name,contexto) #Activar @login_required(login_url='usuario:index') def perfilactivar(request, id): perfil = Perfil.objects.filter(pk=id).first() contexto={} template_name = 'sigetebr/apps/usuario/perfil/estado_activar.html'#url if not perfil: return redirect('usuario:listar_perfil') if request.method=='GET': contexto={'obj':perfil} if request.method=='POST': perfil.estado=True perfil.save() return redirect('usuario:listar_perfil') return render(request,template_name,contexto) #Estado @login_required(login_url='usuario:index') def cambiar_estado_perfil(request, pk): perfil = get_object_or_404(Perfil, pk=pk) if perfil.estado: perfil.estado = False messages.error(request, "Perfil de Usuario Desactivada") else: perfil.estado = True messages.success(request, "Perfil de Usuario Activada") perfil.um = request.user.id perfil.save() return redirect('usuario:listar_perfil')
38.438871
110
0.676562
from django.shortcuts import redirect, render from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.messages.views import SuccessMessageMixin from django.views.generic import (CreateView, UpdateView, DetailView, TemplateView, View, DeleteView,ListView) from django.shortcuts import render, redirect, get_object_or_404 from django.http import (HttpResponseRedirect,JsonResponse, HttpResponse,Http404) from django.contrib import messages from django.contrib.auth.hashers import check_password from django.contrib.auth import authenticate from django.contrib.auth import login as login_django from django.contrib.auth import logout as logout_django from django.contrib.auth.decorators import login_required from django.contrib.auth import update_session_auth_hash from apps.usuario.templatetags.utils import get_ip from django.urls import reverse_lazy, reverse from django.contrib.auth.decorators import login_required import json from apps.usuario.form.forms_perfil import LoginUsuarioPerfilForm,\ PasswordUsuarioPerfilForm,EditarUsuarioPerfilForm,\ PerfilFrom from django.db.models import Q from apps.usuario.models import Perfil from apps.contrato.models import Persona from apps.contrato.models import Cliente from apps.terreno.models import Manzano,Lote class LoginPerfilView(TemplateView,LoginRequiredMixin): login_url = 'usuario:index' template_name = "sigetebr/apps/usuario/index.html" success_url = reverse_lazy("usuario:dashboard") def get_context_data(self, **kwargs): context = super(LoginPerfilView, self).get_context_data(**kwargs) return context def dispatch(self, request, *args, **kwargs): if request.user.is_authenticated: return HttpResponseRedirect(self.success_url) return super(LoginPerfilView, self).dispatch(request, *args, **kwargs) def post(self, request, *args, **kwargs): form = LoginUsuarioPerfilForm(request.POST, request=request) if form.is_valid(): perfil = Perfil.objects.filter(usuario=request.POST.get('usuario')).first() if perfil is not None: if perfil.estado: perfil = authenticate( usuario=request.POST.get('usuario'), password=request.POST.get('password')) if perfil is not None: login_django(request, perfil) return redirect('usuario:dashboard') return render(request, self.template_name, { "error": True, "message": "Tu nombre de usuario y contraseña no coinciden. Inténtalo de nuevo."} ) return render(request, self.template_name, { "error": True, "message": "Su cuenta está inactiva. Por favor, póngase en contacto con el administrador"} ) return render(request, self.template_name, { "error": True, "message": "Tu cuenta no se encuentra. Por favor, póngase en contacto con el administrador"} ) return render(request, self.template_name, { "form": form }) class DashboardView(LoginRequiredMixin,TemplateView): template_name = 'sigetebr/apps/dashboard.html' login_url = 'usuario:index' def get_context_data(self, **kwargs): context = super(DashboardView, self).get_context_data(**kwargs) manzanostodo = Manzano.objects.all() manzanosactiva = Manzano.objects.exclude(estado='False') context["manzanos"] = manzanostodo context["manzano_count"] = manzanosactiva lotestodo = Lote.objects.all() lotesactiva = Lote.objects.exclude(estado='False') context["lotes"] = lotestodo context["lote_count"] = lotesactiva usuariotodo = Perfil.objects.all() usuariodmin = Perfil.objects.exclude(is_superuser='True') usuarioactiva = Perfil.objects.exclude(is_active='True') context["usuario_count"] = usuarioactiva context["usuarios"] = usuariotodo personatodo = Persona.objects.all() personaactiva = Persona.objects.exclude(estado='False') context["persona_count"] = personaactiva context["personas"] = personatodo clientetodo = Cliente.objects.all() clienteactiva = Cliente.objects.exclude(estado='False') context["cliente_count"] = clienteactiva context["clientes"] = clientetodo return context @login_required(login_url='usuario:index') def LogoutView(request): logout_django(request) return redirect('usuario:index') class UsuarioPerfilDetalleView(LoginRequiredMixin,DetailView): model = Perfil template_name = 'sigetebr/apps/usuario/configuracion/perfil_usuario.html' slug_field = 'usuario' slug_url_kwarg = 'usuario_url' login_url = 'usuarios:index' class UsuarioPerfilEditarView(SuccessMessageMixin,LoginRequiredMixin,UpdateView): model = Perfil form_class = EditarUsuarioPerfilForm template_name = 'sigetebr/apps/usuario/configuracion/perfil_form.html' success_url = reverse_lazy('usuarios:perfil_actualizar') context_object_name = "user_obj" login_url = 'usuarios:index' def form_valid(self, form): messages.success(self.request, "Tu Perfil Usuario ha sido actualizado") return super(UsuarioPerfilEditarView, self).form_valid(form) def get_object(self, queryset=None): return self.request.user @login_required(login_url='usuarios:index') def passwordusuarioview(request): template_name = 'sigetebr/apps/usuario/configuracion/perfil_password.html' form = PasswordUsuarioPerfilForm(request.POST or None) if request.method == 'POST': if form.is_valid(): actual = request.POST.get('password') nuevo = request.POST.get('password') confirma =request.POST.get('confimar_password') print(actual) print(nuevo) print(confirma) if not check_password(request.POST.get('password'), request.user.password): messages.warning(request, 'Password Actual no coinciden!') else: if authenticate(usuario = request.user.usuario,password = request.POST.get('password')): request.user.set_password(request.POST.get('new_password')) request.user.save() update_session_auth_hash(request, request.user) messages.success(request, 'Password Actualizado!') else: messages.error(request, 'Verifique su Password por favor!') context = {'form': form} return render(request, template_name, context) USUARIO_FIELDS = [ {'string': 'N°', 'field': 'numero'}, {'string': 'Usuario', 'field': 'usuario'}, {'string': 'Nombres', 'field': 'nombre'}, {'string': 'Email', 'field': 'email'}, {'string': 'Roles', 'field': 'roles'}, {'string': 'Estado', 'field': 'estado'}, {'string': 'Acciones', 'field': 'acciones'}, ] class PerfilListarView(LoginRequiredMixin,TemplateView): model = Perfil template_name = "sigetebr/apps/usuario/perfil/listar.html" login_url = 'usuario:index' def get_queryset(self): queryset = self.model.objects.all() request_post = self.request.POST print(request_post,"Usuario") if request_post: if request_post.get('usuario'): queryset = queryset.filter( usuario__icontains=request_post.get('usuario')) if request_post.get('email'): queryset = queryset.filter( email__icontains=request_post.get('email')) print(queryset, "Resultado") return queryset def get_context_data(self, **kwargs): context = super(PerfilListarView, self).get_context_data(**kwargs) context["list_perfil"] = self.get_queryset() context['fields'] = USUARIO_FIELDS context["per_page"] = self.request.POST.get('per_page') search = False if ( self.request.POST.get('usuario') or self.request.POST.get('email') ): search = True context["search"] = search return context def post(self, request, *args, **kwargs): context = self.get_context_data(**kwargs) return self.render_to_response(context) class PerfilCrearView(SuccessMessageMixin,LoginRequiredMixin,CreateView): model = Perfil template_name = "sigetebr/apps/usuario/perfil/form.html" context_object_name = "obj" form_class = PerfilFrom success_url = reverse_lazy("usuario:listar_perfil") success_message = "Perfil de Usuario Creado Exitosamente" login_url = 'usuario:index' class PerfilEditarView(SuccessMessageMixin,LoginRequiredMixin,UpdateView): model = Perfil template_name = "sigetebr/apps/usuario/perfil/form.html" context_object_name = "obj_usuario" form_class = PerfilFrom success_url = reverse_lazy("usuario:listar_perfil") success_message = "Perfil de Usuario Actualizada Satisfactoriamente" login_url = 'usuario:index' class PerfilDetallesView(LoginRequiredMixin,DetailView): model = Perfil template_name = 'sigetebr/apps/usuario/perfil/detalle.html' slug_field = 'usuario' context_object_name = 'obj' slug_url_kwarg = 'usuario_url' login_url = 'usuario:index' class PerfilEliminarView(SuccessMessageMixin,LoginRequiredMixin,DeleteView): model = Perfil template_name='sigetebr/apps/usuario/perfil/eliminar.html' context_object_name='obj' success_url = reverse_lazy("usuario:listar_perfil") success_message="Perfil de Usuario Eliminada Exitosamente" login_url = 'usuario:index' @login_required(login_url='usuario:index') def perfildesactivar(request, id): perfil = Perfil.objects.filter(pk=id).first() contexto={} template_name = 'sigetebr/apps/usuario/perfil/estado_desactivar.html' if not perfil: return redirect('usuario:listar_perfil') if request.method=='GET': contexto={'obj':perfil} if request.method=='POST': perfil.estado=False perfil.save() return redirect('usuario:listar_perfil') return render(request,template_name,contexto) @login_required(login_url='usuario:index') def perfilactivar(request, id): perfil = Perfil.objects.filter(pk=id).first() contexto={} template_name = 'sigetebr/apps/usuario/perfil/estado_activar.html' if not perfil: return redirect('usuario:listar_perfil') if request.method=='GET': contexto={'obj':perfil} if request.method=='POST': perfil.estado=True perfil.save() return redirect('usuario:listar_perfil') return render(request,template_name,contexto) @login_required(login_url='usuario:index') def cambiar_estado_perfil(request, pk): perfil = get_object_or_404(Perfil, pk=pk) if perfil.estado: perfil.estado = False messages.error(request, "Perfil de Usuario Desactivada") else: perfil.estado = True messages.success(request, "Perfil de Usuario Activada") perfil.um = request.user.id perfil.save() return redirect('usuario:listar_perfil')
true
true
1c45c58302360fe1ea1256f259b08d194601aee0
9,269
py
Python
spookbot.py
carsuki/discord-spookbot
a6bd5b7e80860d7db65f3eb634bab68b9d4c50f1
[ "BSD-3-Clause" ]
1
2021-10-01T13:44:05.000Z
2021-10-01T13:44:05.000Z
spookbot.py
carsuki/discord-spookbot
a6bd5b7e80860d7db65f3eb634bab68b9d4c50f1
[ "BSD-3-Clause" ]
null
null
null
spookbot.py
carsuki/discord-spookbot
a6bd5b7e80860d7db65f3eb634bab68b9d4c50f1
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 import asyncio import discord import logging import json import random logger = logging.getLogger('spookbot') logger.setLevel(logging.INFO) handler = logging.FileHandler(filename='spookbot.log', mode='w', encoding='utf-8') handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')) logger.addHandler(handler) with open("auth.json") as auth: auth = json.load(auth) client = discord.Client() commands = {} def command(func): commands[func.__name__] = func return func @client.event async def on_message(message): for command in commands.keys(): if message.content.startswith('.' + command): logger.info('Command `%s` called from message: %s', command, message.content) await commands[command](message) # {{{ Commands @command async def doot(message): await message.channel.send(message.author.mention + ' doot doot') @command async def funnycatchphrase(message): messages = ["Doot", "AssFuck", "Shit Bruh"] await message.channel.send(random.choice(messages)) @command async def calciumfix(message): messages = ["https://mbtskoudsalg.com/images/transparent-stuff-vaporwave-3.gif", "https://media.giphy.com/media/3ohhwqrNt7rd9yuj7O/source.gif", "https://tenor.com/view/waiting-skeleton-gif-6159814", "https://tenor.com/view/waiting-gif-9030040", "https://tenor.com/view/iphone-skeleton-gif-5452826", "https://tenor.com/view/skeleton-waiting-eating-bored-playing-around-gif-14558363", "https://tenor.com/view/shaking-skeleton-skeletons-gif-4757109", "https://tenor.com/view/skeleton-waiting-keyboard-bored-life-gif-14558359", "https://tenor.com/view/skeleton-tea-gif-10625213", "https://tenor.com/view/skeleton-ruby-swipe-hearts-gif-10625183", "https://media3.giphy.com/media/26BRDDhIt8oiyEjS0/source.gif", "https://media3.giphy.com/media/3o7TKpmHsAZiTTekve/source.gif", "https://media0.giphy.com/media/3o7TKqNtiUdqSfB6EM/source.gif", "https://media0.giphy.com/media/l3fQ6Fh6Ze3rMXn4A/source.gif", "https://media2.giphy.com/media/3o7TKJNbIxU09eccuI/source.gif", "https://media0.giphy.com/media/26BRxmqeqsRPBBOpy/source.gif", "https://media1.giphy.com/media/l46CpUQRyLgvVhvfW/source.gif", "https://media3.giphy.com/media/26BRCc2VNkdZ5tjvG/source.gif", "https://media2.giphy.com/media/l0MYzSbsaFfUZ1DTa/source.gif", "https://format-magazine-production-res.cloudinary.com/image/upload/c_limit,w_540,h_540,f_gif,f_auto/jjjjjohn-skeleton-4", "https://i.gifer.com/WYLS.gif", "https://media0.giphy.com/media/3owyp71e0oJZg3RQsw/source.gif", "https://www.google.com/url?sa=i&rct=j&q=&esrc=s&source=images&cd=&cad=rja&uact=8&ved=2ahUKEwi3o7_j3vvkAhXYGrkGHSQ1AKwQjRx6BAgBEAQ&url=%2Furl%3Fsa%3Di%26rct%3Dj%26q%3D%26esrc%3Ds%26source%3Dimages%26cd%3D%26ved%3D%26url%3Dhttps%253A%252F%252Fgiphy.com%252Fstickers%252Fskeleton-cds-making-it-rain-l0MYPqg7VQWLK9yWQ%26psig%3DAOvVaw2xm1MH0j04_SUAtE8P-Zw6%26ust%3D1570042508187445&psig=AOvVaw2xm1MH0j04_SUAtE8P-Zw6&ust=1570042508187445", "https://upload-assets.vice.com/files/2016/08/02/1470174545JohnKarel7.gif", "https://i.pinimg.com/originals/09/3e/4b/093e4b66b0a3d888db0184f4dc119204.gif", "https://media2.giphy.com/media/xTiTnpT1zjQ8msSQbS/source.gif", "https://i.pinimg.com/originals/c6/db/a2/c6dba2e9a5b48db157c6a2fea4a8b692.gif", "https://media3.giphy.com/media/26gJAECj4uH3zpjAQ/200w.gif", "https://pa1.narvii.com/6991/847644c6226d4e577370be3d4ac6c09b7159ac53r1-540-540_hq.gif", "http://artfcity.com/wp-content/uploads/2016/09/tumblr_o3lj2ehrmY1qza1qzo1_500.gif", "https://i.gifer.com/PaE.gif", "https://media2.giphy.com/media/l0MYR7ATNClP1GjcI/source.gif", "https://i.pinimg.com/originals/cd/e4/e2/cde4e242d5c3ace213a72d33cea9b16e.gif", "https://media1.giphy.com/media/3o6ZtaV6slZPhE0ftu/source.gif", "https://format-magazine-production-res.cloudinary.com/image/upload/c_limit,w_540,h_540,f_gif,f_auto/jjjjjohn-skeleton-9", "https://66.media.tumblr.com/d30560fbc829bcb17b9fd92844088487/tumblr_naes2zz8im1qza1qzo1_500.gif", "https://pa1.narvii.com/6991/533e5f5d561d5c58aff06092fbf12e6fdfb52ecar1-540-540_hq.gif", "https://upload-assets.vice.com/files/2016/08/02/1470174546JohnKarel10.gif", "https://i.kym-cdn.com/photos/images/original/001/186/745/526.gif", "https://media3.giphy.com/media/3o6ZtmOg5coyOIc3OU/source.gif", "https://upload-assets.vice.com/files/2016/08/02/1470174545JohnKarel6.gif", "https://i.pinimg.com/originals/b6/85/99/b6859978fb0af8249b58a52f4755647b.gif", "https://i.kym-cdn.com/photos/images/original/001/178/761/062.gif", "https://format-magazine-production-res.cloudinary.com/image/upload/c_limit,w_540,h_540,f_gif,f_auto/jjjjjohn-skeleton-6", "https://upload-assets.vice.com/files/2016/08/02/1470174545JohnKarel4.gif", "https://78.media.tumblr.com/a7411f14760a4d7978e735d55ed438a6/tumblr_nvdixx3Jxs1qac28vo1_r2_500.gif", "https://cdn.shopify.com/s/files/1/2128/8929/files/skeleton_pizzabites.gif?v=1559674098", "https://upload-assets.vice.com/files/2016/08/02/1470174546JohnKarel8.gif", "https://pa1.narvii.com/6991/1a26e49708a6234f5e34b495b0744ea2564a0623r1-540-540_hq.gif", "https://szx3iab.files.wordpress.com/2016/11/tumblr_n995vvy5dv1qza1qzo1_500.gif?w=352", "https://upload-assets.vice.com/files/2016/08/02/1470174544JohnKarel5.gif", "https://i.pinimg.com/originals/e7/1a/fb/e71afbdcda22ae75f71ddd438074504e.gif", "https://i.pinimg.com/originals/32/80/6f/32806fc20098726a64c8ff3021f80845.gif", "https://66.media.tumblr.com/e1dc70d9af348d26a9e9bae24fea7def/tumblr_p62tyfOY9a1qza1qzo1_540.gifv", "https://szx3iab.files.wordpress.com/2016/11/tumblr_nbvddjrphx1qza1qzo1_r1_500.gif?w=352", "https://i.kym-cdn.com/photos/images/original/001/181/074/55e.gif", "https://steamuserimages-a.akamaihd.net/ugc/922557282351284530/29A2D872D9A199B15B1E5CE6BFD2C49B3CC3A96A/?imw=512&imh=512&ima=fit&impolicy=Letterbox&imcolor=%23000000&letterbox=true", "https://i.pinimg.com/originals/ea/28/e5/ea28e5e9c44c07fa5cee1011a80162cd.gif", "https://66.media.tumblr.com/6a49a3078ea1b35fc676812bd59c7bf8/tumblr_pfov6xxunm1qza1qzo1_540.gif", "https://steamuserimages-a.akamaihd.net/ugc/922557282351282579/CDA6809E7AE096DB1E099E3E478FCB7AB970B2EF/", "https://pa1.narvii.com/6992/c80cbaad5797bcbbeb497db6f97317614959575br1-500-500_hq.gif", "https://66.media.tumblr.com/6fcc55ccbccd8cdad80a4c80eca8298a/tumblr_p9ihvfJ6Bg1qza1qzo1_540.gif", "https://414foto.com/image/289002-full_halloween-magic-gif-by-jjjjjohn-find-share-on-giphy.gif", "https://pa1.narvii.com/6991/145a1099ffab05aeefc24b787f3eaa91d5c245c5r1-540-540_hq.gif", "https://aws1.discourse-cdn.com/woot/original/3X/b/c/bcc6725e388cb4f1ddf0c242ca1f4b50169b912c.gif", "https://img.buzzfeed.com/buzzfeed-static/static/2015-11/18/21/enhanced/webdr03/anigif_original-921-1447900549-1.gif", "https://66.media.tumblr.com/f034b9b7e673fc704a946f845c4775d4/tumblr_nvzf29ynN71qza1qzo1_500.gif", "https://i2.wp.com/www.doperadcool.com/wp-content/uploads/2019/08/jjjjjohn-skeleton-plants-perfect-score.gif?fit=500%2C500&ssl=1", "https://i.pinimg.com/originals/a9/86/cc/a986cc2005b7be0f650f2b92a12a787e.gif", "https://pa1.narvii.com/6991/720fe80da48d6e095924cead73f1ba4da2218789r1-540-540_hq.gif", "https://media0.giphy.com/media/3o7TKWGiPEqIhF0Yrm/source.gif", "https://media.giphy.com/media/y6Xvxvx5Q37LW/giphy.gif", "https://media1.giphy.com/media/3oriNWxJAEYUt59Ego/source.gif", "https://aws1.discourse-cdn.com/woot/original/3X/7/1/71e6d474061c0a43bb671c0e2289fddfb6f81c97.gif", "https://66.media.tumblr.com/62ab226c367aa62d7f13d042486ff083/tumblr_pk23ycWYPE1qza1qzo1_r1_540.gif", "https://media2.giphy.com/media/5t4gL5cVbiN0nSyKkd/source.gif", "https://szx3iab.files.wordpress.com/2016/11/tumblr_nljo30cjit1qza1qzo1_500.gif?w=352", "https://pa1.narvii.com/6991/1823151f01188539eee8c67dfb806241abea6532r1-540-540_hq.gif", "https://media2.giphy.com/media/lJMgI0zIW8Wz49Qc13/source.gif", "https://aws1.discourse-cdn.com/woot/original/3X/b/7/b7c97a5d9f42ddfc429220fcc63a478010bec6d2.gif", "https://img.buzzfeed.com/buzzfeed-static/static/2019-09/23/3/asset/4d8c340c3380/anigif_sub-buzz-6141-1569209960-1.gif?output-quality=auto&output-format=auto&downsize=360:*", "https://aws1.discourse-cdn.com/woot/original/3X/d/b/dbbc1a1f1cbe907160e603da114603116b315252.gif", "https://media3.giphy.com/media/l46CxfeUUs4NV2KJ2/source.gif", "https://i.pinimg.com/originals/e8/e6/c6/e8e6c608e0346ffdcbc20a2344be62bd.gif", "https://i.pinimg.com/originals/ec/ca/89/ecca896e384db32a5b975a7c79741fa3.gif"] await message.channel.send(random.choice(messages)) @command async def spookmeter(message): endswith = message.content.lower().endswith async def send(url): await message.channel.send(url) if endswith('not spooky') or endswith('1'): await send('https://i.imgur.com/OtHOWy4.gif') elif endswith('spoopy') or endswith('2'): await send('https://i.imgur.com/UvoCUa0.gif') elif endswith('p spoopy') or endswith('3'): await send('https://i.imgur.com/HmJXXfh.gif') elif endswith('spooky') or endswith('4'): await send('https://i.imgur.com/o1aLBqG.gif') elif endswith('2spooky') or endswith ('5'): await send('https://i.imgur.com/FToVdJR.gif') # }}} print("https://discordapp.com/oauth2/authorize?&client_id=" + auth['clientId'] + "&scope=bot&permissions=0") client.run(auth['token'])
130.549296
7,276
0.784659
import asyncio import discord import logging import json import random logger = logging.getLogger('spookbot') logger.setLevel(logging.INFO) handler = logging.FileHandler(filename='spookbot.log', mode='w', encoding='utf-8') handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')) logger.addHandler(handler) with open("auth.json") as auth: auth = json.load(auth) client = discord.Client() commands = {} def command(func): commands[func.__name__] = func return func @client.event async def on_message(message): for command in commands.keys(): if message.content.startswith('.' + command): logger.info('Command `%s` called from message: %s', command, message.content) await commands[command](message) @command async def doot(message): await message.channel.send(message.author.mention + ' doot doot') @command async def funnycatchphrase(message): messages = ["Doot", "AssFuck", "Shit Bruh"] await message.channel.send(random.choice(messages)) @command async def calciumfix(message): messages = ["https://mbtskoudsalg.com/images/transparent-stuff-vaporwave-3.gif", "https://media.giphy.com/media/3ohhwqrNt7rd9yuj7O/source.gif", "https://tenor.com/view/waiting-skeleton-gif-6159814", "https://tenor.com/view/waiting-gif-9030040", "https://tenor.com/view/iphone-skeleton-gif-5452826", "https://tenor.com/view/skeleton-waiting-eating-bored-playing-around-gif-14558363", "https://tenor.com/view/shaking-skeleton-skeletons-gif-4757109", "https://tenor.com/view/skeleton-waiting-keyboard-bored-life-gif-14558359", "https://tenor.com/view/skeleton-tea-gif-10625213", "https://tenor.com/view/skeleton-ruby-swipe-hearts-gif-10625183", "https://media3.giphy.com/media/26BRDDhIt8oiyEjS0/source.gif", "https://media3.giphy.com/media/3o7TKpmHsAZiTTekve/source.gif", "https://media0.giphy.com/media/3o7TKqNtiUdqSfB6EM/source.gif", "https://media0.giphy.com/media/l3fQ6Fh6Ze3rMXn4A/source.gif", "https://media2.giphy.com/media/3o7TKJNbIxU09eccuI/source.gif", "https://media0.giphy.com/media/26BRxmqeqsRPBBOpy/source.gif", "https://media1.giphy.com/media/l46CpUQRyLgvVhvfW/source.gif", "https://media3.giphy.com/media/26BRCc2VNkdZ5tjvG/source.gif", "https://media2.giphy.com/media/l0MYzSbsaFfUZ1DTa/source.gif", "https://format-magazine-production-res.cloudinary.com/image/upload/c_limit,w_540,h_540,f_gif,f_auto/jjjjjohn-skeleton-4", "https://i.gifer.com/WYLS.gif", "https://media0.giphy.com/media/3owyp71e0oJZg3RQsw/source.gif", "https://www.google.com/url?sa=i&rct=j&q=&esrc=s&source=images&cd=&cad=rja&uact=8&ved=2ahUKEwi3o7_j3vvkAhXYGrkGHSQ1AKwQjRx6BAgBEAQ&url=%2Furl%3Fsa%3Di%26rct%3Dj%26q%3D%26esrc%3Ds%26source%3Dimages%26cd%3D%26ved%3D%26url%3Dhttps%253A%252F%252Fgiphy.com%252Fstickers%252Fskeleton-cds-making-it-rain-l0MYPqg7VQWLK9yWQ%26psig%3DAOvVaw2xm1MH0j04_SUAtE8P-Zw6%26ust%3D1570042508187445&psig=AOvVaw2xm1MH0j04_SUAtE8P-Zw6&ust=1570042508187445", "https://upload-assets.vice.com/files/2016/08/02/1470174545JohnKarel7.gif", "https://i.pinimg.com/originals/09/3e/4b/093e4b66b0a3d888db0184f4dc119204.gif", "https://media2.giphy.com/media/xTiTnpT1zjQ8msSQbS/source.gif", "https://i.pinimg.com/originals/c6/db/a2/c6dba2e9a5b48db157c6a2fea4a8b692.gif", "https://media3.giphy.com/media/26gJAECj4uH3zpjAQ/200w.gif", "https://pa1.narvii.com/6991/847644c6226d4e577370be3d4ac6c09b7159ac53r1-540-540_hq.gif", "http://artfcity.com/wp-content/uploads/2016/09/tumblr_o3lj2ehrmY1qza1qzo1_500.gif", "https://i.gifer.com/PaE.gif", "https://media2.giphy.com/media/l0MYR7ATNClP1GjcI/source.gif", "https://i.pinimg.com/originals/cd/e4/e2/cde4e242d5c3ace213a72d33cea9b16e.gif", "https://media1.giphy.com/media/3o6ZtaV6slZPhE0ftu/source.gif", "https://format-magazine-production-res.cloudinary.com/image/upload/c_limit,w_540,h_540,f_gif,f_auto/jjjjjohn-skeleton-9", "https://66.media.tumblr.com/d30560fbc829bcb17b9fd92844088487/tumblr_naes2zz8im1qza1qzo1_500.gif", "https://pa1.narvii.com/6991/533e5f5d561d5c58aff06092fbf12e6fdfb52ecar1-540-540_hq.gif", "https://upload-assets.vice.com/files/2016/08/02/1470174546JohnKarel10.gif", "https://i.kym-cdn.com/photos/images/original/001/186/745/526.gif", "https://media3.giphy.com/media/3o6ZtmOg5coyOIc3OU/source.gif", "https://upload-assets.vice.com/files/2016/08/02/1470174545JohnKarel6.gif", "https://i.pinimg.com/originals/b6/85/99/b6859978fb0af8249b58a52f4755647b.gif", "https://i.kym-cdn.com/photos/images/original/001/178/761/062.gif", "https://format-magazine-production-res.cloudinary.com/image/upload/c_limit,w_540,h_540,f_gif,f_auto/jjjjjohn-skeleton-6", "https://upload-assets.vice.com/files/2016/08/02/1470174545JohnKarel4.gif", "https://78.media.tumblr.com/a7411f14760a4d7978e735d55ed438a6/tumblr_nvdixx3Jxs1qac28vo1_r2_500.gif", "https://cdn.shopify.com/s/files/1/2128/8929/files/skeleton_pizzabites.gif?v=1559674098", "https://upload-assets.vice.com/files/2016/08/02/1470174546JohnKarel8.gif", "https://pa1.narvii.com/6991/1a26e49708a6234f5e34b495b0744ea2564a0623r1-540-540_hq.gif", "https://szx3iab.files.wordpress.com/2016/11/tumblr_n995vvy5dv1qza1qzo1_500.gif?w=352", "https://upload-assets.vice.com/files/2016/08/02/1470174544JohnKarel5.gif", "https://i.pinimg.com/originals/e7/1a/fb/e71afbdcda22ae75f71ddd438074504e.gif", "https://i.pinimg.com/originals/32/80/6f/32806fc20098726a64c8ff3021f80845.gif", "https://66.media.tumblr.com/e1dc70d9af348d26a9e9bae24fea7def/tumblr_p62tyfOY9a1qza1qzo1_540.gifv", "https://szx3iab.files.wordpress.com/2016/11/tumblr_nbvddjrphx1qza1qzo1_r1_500.gif?w=352", "https://i.kym-cdn.com/photos/images/original/001/181/074/55e.gif", "https://steamuserimages-a.akamaihd.net/ugc/922557282351284530/29A2D872D9A199B15B1E5CE6BFD2C49B3CC3A96A/?imw=512&imh=512&ima=fit&impolicy=Letterbox&imcolor=%23000000&letterbox=true", "https://i.pinimg.com/originals/ea/28/e5/ea28e5e9c44c07fa5cee1011a80162cd.gif", "https://66.media.tumblr.com/6a49a3078ea1b35fc676812bd59c7bf8/tumblr_pfov6xxunm1qza1qzo1_540.gif", "https://steamuserimages-a.akamaihd.net/ugc/922557282351282579/CDA6809E7AE096DB1E099E3E478FCB7AB970B2EF/", "https://pa1.narvii.com/6992/c80cbaad5797bcbbeb497db6f97317614959575br1-500-500_hq.gif", "https://66.media.tumblr.com/6fcc55ccbccd8cdad80a4c80eca8298a/tumblr_p9ihvfJ6Bg1qza1qzo1_540.gif", "https://414foto.com/image/289002-full_halloween-magic-gif-by-jjjjjohn-find-share-on-giphy.gif", "https://pa1.narvii.com/6991/145a1099ffab05aeefc24b787f3eaa91d5c245c5r1-540-540_hq.gif", "https://aws1.discourse-cdn.com/woot/original/3X/b/c/bcc6725e388cb4f1ddf0c242ca1f4b50169b912c.gif", "https://img.buzzfeed.com/buzzfeed-static/static/2015-11/18/21/enhanced/webdr03/anigif_original-921-1447900549-1.gif", "https://66.media.tumblr.com/f034b9b7e673fc704a946f845c4775d4/tumblr_nvzf29ynN71qza1qzo1_500.gif", "https://i2.wp.com/www.doperadcool.com/wp-content/uploads/2019/08/jjjjjohn-skeleton-plants-perfect-score.gif?fit=500%2C500&ssl=1", "https://i.pinimg.com/originals/a9/86/cc/a986cc2005b7be0f650f2b92a12a787e.gif", "https://pa1.narvii.com/6991/720fe80da48d6e095924cead73f1ba4da2218789r1-540-540_hq.gif", "https://media0.giphy.com/media/3o7TKWGiPEqIhF0Yrm/source.gif", "https://media.giphy.com/media/y6Xvxvx5Q37LW/giphy.gif", "https://media1.giphy.com/media/3oriNWxJAEYUt59Ego/source.gif", "https://aws1.discourse-cdn.com/woot/original/3X/7/1/71e6d474061c0a43bb671c0e2289fddfb6f81c97.gif", "https://66.media.tumblr.com/62ab226c367aa62d7f13d042486ff083/tumblr_pk23ycWYPE1qza1qzo1_r1_540.gif", "https://media2.giphy.com/media/5t4gL5cVbiN0nSyKkd/source.gif", "https://szx3iab.files.wordpress.com/2016/11/tumblr_nljo30cjit1qza1qzo1_500.gif?w=352", "https://pa1.narvii.com/6991/1823151f01188539eee8c67dfb806241abea6532r1-540-540_hq.gif", "https://media2.giphy.com/media/lJMgI0zIW8Wz49Qc13/source.gif", "https://aws1.discourse-cdn.com/woot/original/3X/b/7/b7c97a5d9f42ddfc429220fcc63a478010bec6d2.gif", "https://img.buzzfeed.com/buzzfeed-static/static/2019-09/23/3/asset/4d8c340c3380/anigif_sub-buzz-6141-1569209960-1.gif?output-quality=auto&output-format=auto&downsize=360:*", "https://aws1.discourse-cdn.com/woot/original/3X/d/b/dbbc1a1f1cbe907160e603da114603116b315252.gif", "https://media3.giphy.com/media/l46CxfeUUs4NV2KJ2/source.gif", "https://i.pinimg.com/originals/e8/e6/c6/e8e6c608e0346ffdcbc20a2344be62bd.gif", "https://i.pinimg.com/originals/ec/ca/89/ecca896e384db32a5b975a7c79741fa3.gif"] await message.channel.send(random.choice(messages)) @command async def spookmeter(message): endswith = message.content.lower().endswith async def send(url): await message.channel.send(url) if endswith('not spooky') or endswith('1'): await send('https://i.imgur.com/OtHOWy4.gif') elif endswith('spoopy') or endswith('2'): await send('https://i.imgur.com/UvoCUa0.gif') elif endswith('p spoopy') or endswith('3'): await send('https://i.imgur.com/HmJXXfh.gif') elif endswith('spooky') or endswith('4'): await send('https://i.imgur.com/o1aLBqG.gif') elif endswith('2spooky') or endswith ('5'): await send('https://i.imgur.com/FToVdJR.gif') print("https://discordapp.com/oauth2/authorize?&client_id=" + auth['clientId'] + "&scope=bot&permissions=0") client.run(auth['token'])
true
true
1c45c59d5474af1e72f1993635d141aeccc75b6e
416
py
Python
products/migrations/0002_auto_20210110_1353.py
ashishkr619/dukaan_main
b236b498b95f62160959b5e84bb642a0be6063b0
[ "MIT" ]
null
null
null
products/migrations/0002_auto_20210110_1353.py
ashishkr619/dukaan_main
b236b498b95f62160959b5e84bb642a0be6063b0
[ "MIT" ]
null
null
null
products/migrations/0002_auto_20210110_1353.py
ashishkr619/dukaan_main
b236b498b95f62160959b5e84bb642a0be6063b0
[ "MIT" ]
null
null
null
# Generated by Django 2.2.17 on 2021-01-10 13:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('products', '0001_initial'), ] operations = [ migrations.AlterField( model_name='product', name='category', field=models.CharField(max_length=120, verbose_name='Product Categories'), ), ]
21.894737
86
0.612981
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('products', '0001_initial'), ] operations = [ migrations.AlterField( model_name='product', name='category', field=models.CharField(max_length=120, verbose_name='Product Categories'), ), ]
true
true
1c45c5a180008bb7c796a6f16b8559ac7395f0c5
1,094
py
Python
test/fx2trt/converters/acc_op/test_relu.py
steffenerickson/pytorch
0b656c4c69ce77ecd9aace486e471917e4660746
[ "Intel" ]
1
2022-01-31T14:15:35.000Z
2022-01-31T14:15:35.000Z
test/fx2trt/converters/acc_op/test_relu.py
steffenerickson/pytorch
0b656c4c69ce77ecd9aace486e471917e4660746
[ "Intel" ]
1
2022-02-03T12:43:23.000Z
2022-02-03T12:47:53.000Z
test/fx2trt/converters/acc_op/test_relu.py
steffenerickson/pytorch
0b656c4c69ce77ecd9aace486e471917e4660746
[ "Intel" ]
null
null
null
# Owner(s): ["oncall: aiacc"] import torch import torch.fx.experimental.fx_acc.acc_ops as acc_ops import torch.nn as nn from torch.testing._internal.common_fx2trt import AccTestCase, InputTensorSpec from torch.testing._internal.common_utils import run_tests class TestReLUConverter(AccTestCase): def test_relu(self): class TestModule(nn.Module): def forward(self, x): return nn.functional.relu(x) inputs = [torch.randn(1, 10)] self.run_test(TestModule(), inputs, expected_ops={acc_ops.relu}) def test_relu_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): return nn.functional.relu(x) input_specs = [ InputTensorSpec( shape=(-1, -1, -1), dtype=torch.float32, shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] self.run_test_with_dynamic_shape( TestModule(), input_specs, expected_ops={acc_ops.relu} ) if __name__ == '__main__': run_tests()
29.567568
78
0.606947
import torch import torch.fx.experimental.fx_acc.acc_ops as acc_ops import torch.nn as nn from torch.testing._internal.common_fx2trt import AccTestCase, InputTensorSpec from torch.testing._internal.common_utils import run_tests class TestReLUConverter(AccTestCase): def test_relu(self): class TestModule(nn.Module): def forward(self, x): return nn.functional.relu(x) inputs = [torch.randn(1, 10)] self.run_test(TestModule(), inputs, expected_ops={acc_ops.relu}) def test_relu_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): return nn.functional.relu(x) input_specs = [ InputTensorSpec( shape=(-1, -1, -1), dtype=torch.float32, shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] self.run_test_with_dynamic_shape( TestModule(), input_specs, expected_ops={acc_ops.relu} ) if __name__ == '__main__': run_tests()
true
true
1c45c79ba7fba114d9c50c58c0dee7cf69a990c6
36,332
py
Python
scripts/validate_docstrings.py
kpflugshaupt/pandas
c9e3883c630c48b17218e6bcc5593720c1402bf1
[ "BSD-3-Clause" ]
80
2015-01-01T17:32:11.000Z
2022-01-24T07:17:47.000Z
scripts/validate_docstrings.py
sanjusci/pandas
a1fee9199eba7ebf423880243936b9f1501d3d3a
[ "BSD-3-Clause" ]
null
null
null
scripts/validate_docstrings.py
sanjusci/pandas
a1fee9199eba7ebf423880243936b9f1501d3d3a
[ "BSD-3-Clause" ]
28
2015-01-30T16:07:48.000Z
2022-02-11T18:41:13.000Z
#!/usr/bin/env python """ Analyze docstrings to detect errors. If no argument is provided, it does a quick check of docstrings and returns a csv with all API functions and results of basic checks. If a function or method is provided in the form "pandas.function", "pandas.module.class.method", etc. a list of all errors in the docstring for the specified function or method. Usage:: $ ./validate_docstrings.py $ ./validate_docstrings.py pandas.DataFrame.head """ import os import sys import json import re import glob import functools import collections import argparse import pydoc import inspect import importlib import doctest import tempfile import ast import textwrap import flake8.main.application try: from io import StringIO except ImportError: from cStringIO import StringIO # Template backend makes matplotlib to not plot anything. This is useful # to avoid that plot windows are open from the doctests while running the # script. Setting here before matplotlib is loaded. # We don't warn for the number of open plots, as none is actually being opened os.environ['MPLBACKEND'] = 'Template' import matplotlib matplotlib.rc('figure', max_open_warning=10000) import numpy BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, os.path.join(BASE_PATH)) import pandas from pandas.compat import signature sys.path.insert(1, os.path.join(BASE_PATH, 'doc', 'sphinxext')) from numpydoc.docscrape import NumpyDocString from pandas.io.formats.printing import pprint_thing PRIVATE_CLASSES = ['NDFrame', 'IndexOpsMixin'] DIRECTIVES = ['versionadded', 'versionchanged', 'deprecated'] ALLOWED_SECTIONS = ['Parameters', 'Attributes', 'Methods', 'Returns', 'Yields', 'Other Parameters', 'Raises', 'Warns', 'See Also', 'Notes', 'References', 'Examples'] ERROR_MSGS = { 'GL01': 'Docstring text (summary) should start in the line immediately ' 'after the opening quotes (not in the same line, or leaving a ' 'blank line in between)', 'GL02': 'Closing quotes should be placed in the line after the last text ' 'in the docstring (do not close the quotes in the same line as ' 'the text, or leave a blank line between the last text and the ' 'quotes)', 'GL03': 'Double line break found; please use only one blank line to ' 'separate sections or paragraphs, and do not leave blank lines ' 'at the end of docstrings', 'GL04': 'Private classes ({mentioned_private_classes}) should not be ' 'mentioned in public docstrings', 'GL05': 'Tabs found at the start of line "{line_with_tabs}", please use ' 'whitespace only', 'GL06': 'Found unknown section "{section}". Allowed sections are: ' '{allowed_sections}', 'GL07': 'Sections are in the wrong order. Correct order is: ' '{correct_sections}', 'GL08': 'The object does not have a docstring', 'GL09': 'Deprecation warning should precede extended summary', 'SS01': 'No summary found (a short summary in a single line should be ' 'present at the beginning of the docstring)', 'SS02': 'Summary does not start with a capital letter', 'SS03': 'Summary does not end with a period', 'SS04': 'Summary contains heading whitespaces', 'SS05': 'Summary must start with infinitive verb, not third person ' '(e.g. use "Generate" instead of "Generates")', 'SS06': 'Summary should fit in a single line', 'ES01': 'No extended summary found', 'PR01': 'Parameters {missing_params} not documented', 'PR02': 'Unknown parameters {unknown_params}', 'PR03': 'Wrong parameters order. Actual: {actual_params}. ' 'Documented: {documented_params}', 'PR04': 'Parameter "{param_name}" has no type', 'PR05': 'Parameter "{param_name}" type should not finish with "."', 'PR06': 'Parameter "{param_name}" type should use "{right_type}" instead ' 'of "{wrong_type}"', 'PR07': 'Parameter "{param_name}" has no description', 'PR08': 'Parameter "{param_name}" description should start with a ' 'capital letter', 'PR09': 'Parameter "{param_name}" description should finish with "."', 'PR10': 'Parameter "{param_name}" requires a space before the colon ' 'separating the parameter name and type', 'RT01': 'No Returns section found', 'RT02': 'The first line of the Returns section should contain only the ' 'type, unless multiple values are being returned', 'RT03': 'Return value has no description', 'RT04': 'Return value description should start with a capital letter', 'RT05': 'Return value description should finish with "."', 'YD01': 'No Yields section found', 'SA01': 'See Also section not found', 'SA02': 'Missing period at end of description for See Also ' '"{reference_name}" reference', 'SA03': 'Description should be capitalized for See Also ' '"{reference_name}" reference', 'SA04': 'Missing description for See Also "{reference_name}" reference', 'SA05': '{reference_name} in `See Also` section does not need `pandas` ' 'prefix, use {right_reference} instead.', 'EX01': 'No examples section found', 'EX02': 'Examples do not pass tests:\n{doctest_log}', 'EX03': 'flake8 error: {error_code} {error_message}{times_happening}', 'EX04': 'Do not import {imported_library}, as it is imported ' 'automatically for the examples (numpy as np, pandas as pd)', } def error(code, **kwargs): """ Return a tuple with the error code and the message with variables replaced. This is syntactic sugar so instead of: - `('EX02', ERROR_MSGS['EX02'].format(doctest_log=log))` We can simply use: - `error('EX02', doctest_log=log)` Parameters ---------- code : str Error code. **kwargs Values for the variables in the error messages Returns ------- code : str Error code. message : str Error message with varaibles replaced. """ return (code, ERROR_MSGS[code].format(**kwargs)) def get_api_items(api_doc_fd): """ Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located. """ current_module = 'pandas' previous_line = current_section = current_subsection = '' position = None for line in api_doc_fd: line = line.strip() if len(line) == len(previous_line): if set(line) == set('-'): current_section = previous_line continue if set(line) == set('~'): current_subsection = previous_line continue if line.startswith('.. currentmodule::'): current_module = line.replace('.. currentmodule::', '').strip() continue if line == '.. autosummary::': position = 'autosummary' continue if position == 'autosummary': if line == '': position = 'items' continue if position == 'items': if line == '': position = None continue item = line.strip() func = importlib.import_module(current_module) for part in item.split('.'): func = getattr(func, part) yield ('.'.join([current_module, item]), func, current_section, current_subsection) previous_line = line class Docstring(object): def __init__(self, name): self.name = name obj = self._load_obj(name) self.obj = obj self.code_obj = self._to_original_callable(obj) self.raw_doc = obj.__doc__ or '' self.clean_doc = pydoc.getdoc(obj) self.doc = NumpyDocString(self.clean_doc) def __len__(self): return len(self.raw_doc) @staticmethod def _load_obj(name): """ Import Python object from its name as string. Parameters ---------- name : str Object name to import (e.g. pandas.Series.str.upper) Returns ------- object Python object that can be a class, method, function... Examples -------- >>> Docstring._load_obj('pandas.Series') <class 'pandas.core.series.Series'> """ for maxsplit in range(1, name.count('.') + 1): # TODO when py3 only replace by: module, *func_parts = ... func_name_split = name.rsplit('.', maxsplit) module = func_name_split[0] func_parts = func_name_split[1:] try: obj = importlib.import_module(module) except ImportError: pass else: continue if 'obj' not in locals(): raise ImportError('No module can be imported ' 'from "{}"'.format(name)) for part in func_parts: obj = getattr(obj, part) return obj @staticmethod def _to_original_callable(obj): """ Find the Python object that contains the source code of the object. This is useful to find the place in the source code (file and line number) where a docstring is defined. It does not currently work for all cases, but it should help find some (properties...). """ while True: if inspect.isfunction(obj) or inspect.isclass(obj): f = inspect.getfile(obj) if f.startswith('<') and f.endswith('>'): return None return obj if inspect.ismethod(obj): obj = obj.__func__ elif isinstance(obj, functools.partial): obj = obj.func elif isinstance(obj, property): obj = obj.fget else: return None @property def type(self): return type(self.obj).__name__ @property def is_function_or_method(self): # TODO(py27): remove ismethod return (inspect.isfunction(self.obj) or inspect.ismethod(self.obj)) @property def source_file_name(self): """ File name where the object is implemented (e.g. pandas/core/frame.py). """ try: fname = inspect.getsourcefile(self.code_obj) except TypeError: # In some cases the object is something complex like a cython # object that can't be easily introspected. An it's better to # return the source code file of the object as None, than crash pass else: if fname: fname = os.path.relpath(fname, BASE_PATH) return fname @property def source_file_def_line(self): """ Number of line where the object is defined in its file. """ try: return inspect.getsourcelines(self.code_obj)[-1] except (OSError, TypeError): # In some cases the object is something complex like a cython # object that can't be easily introspected. An it's better to # return the line number as None, than crash pass @property def github_url(self): url = 'https://github.com/pandas-dev/pandas/blob/master/' url += '{}#L{}'.format(self.source_file_name, self.source_file_def_line) return url @property def start_blank_lines(self): i = None if self.raw_doc: for i, row in enumerate(self.raw_doc.split('\n')): if row.strip(): break return i @property def end_blank_lines(self): i = None if self.raw_doc: for i, row in enumerate(reversed(self.raw_doc.split('\n'))): if row.strip(): break return i @property def double_blank_lines(self): prev = True for row in self.raw_doc.split('\n'): if not prev and not row.strip(): return True prev = row.strip() return False @property def section_titles(self): sections = [] self.doc._doc.reset() while not self.doc._doc.eof(): content = self.doc._read_to_next_section() if (len(content) > 1 and len(content[0]) == len(content[1]) and set(content[1]) == {'-'}): sections.append(content[0]) return sections @property def summary(self): return ' '.join(self.doc['Summary']) @property def num_summary_lines(self): return len(self.doc['Summary']) @property def extended_summary(self): if not self.doc['Extended Summary'] and len(self.doc['Summary']) > 1: return ' '.join(self.doc['Summary']) return ' '.join(self.doc['Extended Summary']) @property def needs_summary(self): return not (bool(self.summary) and bool(self.extended_summary)) @property def doc_parameters(self): return collections.OrderedDict((name, (type_, ''.join(desc))) for name, type_, desc in self.doc['Parameters']) @property def signature_parameters(self): if inspect.isclass(self.obj): if hasattr(self.obj, '_accessors') and ( self.name.split('.')[-1] in self.obj._accessors): # accessor classes have a signature but don't want to show this return tuple() try: sig = signature(self.obj) except (TypeError, ValueError): # Some objects, mainly in C extensions do not support introspection # of the signature return tuple() params = sig.args if sig.varargs: params.append("*" + sig.varargs) if sig.keywords: params.append("**" + sig.keywords) params = tuple(params) if params and params[0] in ('self', 'cls'): return params[1:] return params @property def parameter_mismatches(self): errs = [] signature_params = self.signature_parameters doc_params = tuple(self.doc_parameters) missing = set(signature_params) - set(doc_params) if missing: errs.append(error('PR01', missing_params=pprint_thing(missing))) extra = set(doc_params) - set(signature_params) if extra: errs.append(error('PR02', unknown_params=pprint_thing(extra))) if (not missing and not extra and signature_params != doc_params and not (not signature_params and not doc_params)): errs.append(error('PR03', actual_params=signature_params, documented_params=doc_params)) return errs @property def correct_parameters(self): return not bool(self.parameter_mismatches) def parameter_type(self, param): return self.doc_parameters[param][0] def parameter_desc(self, param): desc = self.doc_parameters[param][1] # Find and strip out any sphinx directives for directive in DIRECTIVES: full_directive = '.. {}'.format(directive) if full_directive in desc: # Only retain any description before the directive desc = desc[:desc.index(full_directive)] return desc @property def see_also(self): return collections.OrderedDict((name, ''.join(desc)) for name, desc, _ in self.doc['See Also']) @property def examples(self): return self.doc['Examples'] @property def returns(self): return self.doc['Returns'] @property def yields(self): return self.doc['Yields'] @property def method_source(self): try: source = inspect.getsource(self.obj) except TypeError: return '' return textwrap.dedent(source) @property def method_returns_something(self): ''' Check if the docstrings method can return something. Bare returns, returns valued None and returns from nested functions are disconsidered. Returns ------- bool Whether the docstrings method can return something. ''' def get_returns_not_on_nested_functions(node): returns = [node] if isinstance(node, ast.Return) else [] for child in ast.iter_child_nodes(node): # Ignore nested functions and its subtrees. if not isinstance(child, ast.FunctionDef): child_returns = get_returns_not_on_nested_functions(child) returns.extend(child_returns) return returns tree = ast.parse(self.method_source).body if tree: returns = get_returns_not_on_nested_functions(tree[0]) return_values = [r.value for r in returns] # Replace NameConstant nodes valued None for None. for i, v in enumerate(return_values): if isinstance(v, ast.NameConstant) and v.value is None: return_values[i] = None return any(return_values) else: return False @property def first_line_ends_in_dot(self): if self.doc: return self.doc.split('\n')[0][-1] == '.' @property def deprecated_with_directive(self): return '.. deprecated:: ' in (self.summary + self.extended_summary) @property def deprecated(self): return (self.name.startswith('pandas.Panel') or self.deprecated_with_directive) @property def mentioned_private_classes(self): return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc] @property def examples_errors(self): flags = doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL finder = doctest.DocTestFinder() runner = doctest.DocTestRunner(optionflags=flags) context = {'np': numpy, 'pd': pandas} error_msgs = '' for test in finder.find(self.raw_doc, self.name, globs=context): f = StringIO() runner.run(test, out=f.write) error_msgs += f.getvalue() return error_msgs @property def examples_source_code(self): lines = doctest.DocTestParser().get_examples(self.raw_doc) return [line.source for line in lines] def validate_pep8(self): if not self.examples: return # F401 is needed to not generate flake8 errors in examples # that do not user numpy or pandas content = ''.join(('import numpy as np # noqa: F401\n', 'import pandas as pd # noqa: F401\n', *self.examples_source_code)) application = flake8.main.application.Application() application.initialize(["--quiet"]) with tempfile.NamedTemporaryFile(mode='w') as file: file.write(content) file.flush() application.run_checks([file.name]) # We need this to avoid flake8 printing the names of the files to # the standard output application.formatter.write = lambda line, source: None application.report() yield from application.guide.stats.statistics_for('') def get_validation_data(doc): """ Validate the docstring. Parameters ---------- doc : Docstring A Docstring object with the given function name. Returns ------- tuple errors : list of tuple Errors occurred during validation. warnings : list of tuple Warnings occurred during validation. examples_errs : str Examples usage displayed along the error, otherwise empty string. Notes ----- The errors codes are defined as: - First two characters: Section where the error happens: * GL: Global (no section, like section ordering errors) * SS: Short summary * ES: Extended summary * PR: Parameters * RT: Returns * YD: Yields * RS: Raises * WN: Warns * SA: See Also * NT: Notes * RF: References * EX: Examples - Last two characters: Numeric error code inside the section For example, EX02 is the second codified error in the Examples section (which in this case is assigned to examples that do not pass the tests). The error codes, their corresponding error messages, and the details on how they are validated, are not documented more than in the source code of this function. """ errs = [] wrns = [] if not doc.raw_doc: errs.append(error('GL08')) return errs, wrns, '' if doc.start_blank_lines != 1: errs.append(error('GL01')) if doc.end_blank_lines != 1: errs.append(error('GL02')) if doc.double_blank_lines: errs.append(error('GL03')) mentioned_errs = doc.mentioned_private_classes if mentioned_errs: errs.append(error('GL04', mentioned_private_classes=', '.join(mentioned_errs))) for line in doc.raw_doc.splitlines(): if re.match("^ *\t", line): errs.append(error('GL05', line_with_tabs=line.lstrip())) unexpected_sections = [section for section in doc.section_titles if section not in ALLOWED_SECTIONS] for section in unexpected_sections: errs.append(error('GL06', section=section, allowed_sections=', '.join(ALLOWED_SECTIONS))) correct_order = [section for section in ALLOWED_SECTIONS if section in doc.section_titles] if correct_order != doc.section_titles: errs.append(error('GL07', correct_sections=', '.join(correct_order))) if (doc.deprecated_with_directive and not doc.extended_summary.startswith('.. deprecated:: ')): errs.append(error('GL09')) if not doc.summary: errs.append(error('SS01')) else: if not doc.summary[0].isupper(): errs.append(error('SS02')) if doc.summary[-1] != '.': errs.append(error('SS03')) if doc.summary != doc.summary.lstrip(): errs.append(error('SS04')) elif (doc.is_function_or_method and doc.summary.split(' ')[0][-1] == 's'): errs.append(error('SS05')) if doc.num_summary_lines > 1: errs.append(error('SS06')) if not doc.extended_summary: wrns.append(('ES01', 'No extended summary found')) # PR01: Parameters not documented # PR02: Unknown parameters # PR03: Wrong parameters order errs += doc.parameter_mismatches for param in doc.doc_parameters: if not param.startswith("*"): # Check can ignore var / kwargs if not doc.parameter_type(param): if ':' in param: errs.append(error('PR10', param_name=param.split(':')[0])) else: errs.append(error('PR04', param_name=param)) else: if doc.parameter_type(param)[-1] == '.': errs.append(error('PR05', param_name=param)) common_type_errors = [('integer', 'int'), ('boolean', 'bool'), ('string', 'str')] for wrong_type, right_type in common_type_errors: if wrong_type in doc.parameter_type(param): errs.append(error('PR06', param_name=param, right_type=right_type, wrong_type=wrong_type)) if not doc.parameter_desc(param): errs.append(error('PR07', param_name=param)) else: if not doc.parameter_desc(param)[0].isupper(): errs.append(error('PR08', param_name=param)) if doc.parameter_desc(param)[-1] != '.': errs.append(error('PR09', param_name=param)) if doc.is_function_or_method: if not doc.returns: if doc.method_returns_something: errs.append(error('RT01')) else: if len(doc.returns) == 1 and doc.returns[0][1]: errs.append(error('RT02')) for name_or_type, type_, desc in doc.returns: if not desc: errs.append(error('RT03')) else: desc = ' '.join(desc) if not desc[0].isupper(): errs.append(error('RT04')) if not desc.endswith('.'): errs.append(error('RT05')) if not doc.yields and 'yield' in doc.method_source: errs.append(error('YD01')) if not doc.see_also: wrns.append(error('SA01')) else: for rel_name, rel_desc in doc.see_also.items(): if rel_desc: if not rel_desc.endswith('.'): errs.append(error('SA02', reference_name=rel_name)) if not rel_desc[0].isupper(): errs.append(error('SA03', reference_name=rel_name)) else: errs.append(error('SA04', reference_name=rel_name)) if rel_name.startswith('pandas.'): errs.append(error('SA05', reference_name=rel_name, right_reference=rel_name[len('pandas.'):])) examples_errs = '' if not doc.examples: wrns.append(error('EX01')) else: examples_errs = doc.examples_errors if examples_errs: errs.append(error('EX02', doctest_log=examples_errs)) for err in doc.validate_pep8(): errs.append(error('EX03', error_code=err.error_code, error_message=err.message, times_happening=' ({} times)'.format(err.count) if err.count > 1 else '')) examples_source_code = ''.join(doc.examples_source_code) for wrong_import in ('numpy', 'pandas'): if 'import {}'.format(wrong_import) in examples_source_code: errs.append(error('EX04', imported_library=wrong_import)) return errs, wrns, examples_errs def validate_one(func_name): """ Validate the docstring for the given func_name Parameters ---------- func_name : function Function whose docstring will be evaluated (e.g. pandas.read_csv). Returns ------- dict A dictionary containing all the information obtained from validating the docstring. """ doc = Docstring(func_name) errs, wrns, examples_errs = get_validation_data(doc) return {'type': doc.type, 'docstring': doc.clean_doc, 'deprecated': doc.deprecated, 'file': doc.source_file_name, 'file_line': doc.source_file_def_line, 'github_link': doc.github_url, 'errors': errs, 'warnings': wrns, 'examples_errors': examples_errs} def validate_all(prefix, ignore_deprecated=False): """ Execute the validation of all docstrings, and return a dict with the results. Parameters ---------- prefix : str or None If provided, only the docstrings that start with this pattern will be validated. If None, all docstrings will be validated. ignore_deprecated: bool, default False If True, deprecated objects are ignored when validating docstrings. Returns ------- dict A dictionary with an item for every function/method... containing all the validation information. """ result = {} seen = {} # functions from the API docs api_doc_fnames = os.path.join( BASE_PATH, 'doc', 'source', 'reference', '*.rst') api_items = [] for api_doc_fname in glob.glob(api_doc_fnames): with open(api_doc_fname) as f: api_items += list(get_api_items(f)) for func_name, func_obj, section, subsection in api_items: if prefix and not func_name.startswith(prefix): continue doc_info = validate_one(func_name) if ignore_deprecated and doc_info['deprecated']: continue result[func_name] = doc_info shared_code_key = doc_info['file'], doc_info['file_line'] shared_code = seen.get(shared_code_key, '') result[func_name].update({'in_api': True, 'section': section, 'subsection': subsection, 'shared_code_with': shared_code}) seen[shared_code_key] = func_name # functions from introspecting Series, DataFrame and Panel api_item_names = set(list(zip(*api_items))[0]) for class_ in (pandas.Series, pandas.DataFrame, pandas.Panel): for member in inspect.getmembers(class_): func_name = 'pandas.{}.{}'.format(class_.__name__, member[0]) if (not member[0].startswith('_') and func_name not in api_item_names): if prefix and not func_name.startswith(prefix): continue doc_info = validate_one(func_name) if ignore_deprecated and doc_info['deprecated']: continue result[func_name] = doc_info result[func_name]['in_api'] = False return result def main(func_name, prefix, errors, output_format, ignore_deprecated): def header(title, width=80, char='#'): full_line = char * width side_len = (width - len(title) - 2) // 2 adj = '' if len(title) % 2 == 0 else ' ' title_line = '{side} {title}{adj} {side}'.format(side=char * side_len, title=title, adj=adj) return '\n{full_line}\n{title_line}\n{full_line}\n\n'.format( full_line=full_line, title_line=title_line) exit_status = 0 if func_name is None: result = validate_all(prefix, ignore_deprecated) if output_format == 'json': output = json.dumps(result) else: if output_format == 'default': output_format = '{text}\n' elif output_format == 'azure': output_format = ('##vso[task.logissue type=error;' 'sourcepath={path};' 'linenumber={row};' 'code={code};' ']{text}\n') else: raise ValueError('Unknown output_format "{}"'.format( output_format)) output = '' for name, res in result.items(): for err_code, err_desc in res['errors']: # The script would be faster if instead of filtering the # errors after validating them, it didn't validate them # initially. But that would complicate the code too much if errors and err_code not in errors: continue exit_status += 1 output += output_format.format( name=name, path=res['file'], row=res['file_line'], code=err_code, text='{}: {}'.format(name, err_desc)) sys.stdout.write(output) else: result = validate_one(func_name) sys.stderr.write(header('Docstring ({})'.format(func_name))) sys.stderr.write('{}\n'.format(result['docstring'])) sys.stderr.write(header('Validation')) if result['errors']: sys.stderr.write('{} Errors found:\n'.format( len(result['errors']))) for err_code, err_desc in result['errors']: # Failing examples are printed at the end if err_code == 'EX02': sys.stderr.write('\tExamples do not pass tests\n') continue sys.stderr.write('\t{}\n'.format(err_desc)) if result['warnings']: sys.stderr.write('{} Warnings found:\n'.format( len(result['warnings']))) for wrn_code, wrn_desc in result['warnings']: sys.stderr.write('\t{}\n'.format(wrn_desc)) if not result['errors']: sys.stderr.write('Docstring for "{}" correct. :)\n'.format( func_name)) if result['examples_errors']: sys.stderr.write(header('Doctests')) sys.stderr.write(result['examples_errors']) return exit_status if __name__ == '__main__': format_opts = 'default', 'json', 'azure' func_help = ('function or method to validate (e.g. pandas.DataFrame.head) ' 'if not provided, all docstrings are validated and returned ' 'as JSON') argparser = argparse.ArgumentParser( description='validate pandas docstrings') argparser.add_argument('function', nargs='?', default=None, help=func_help) argparser.add_argument('--format', default='default', choices=format_opts, help='format of the output when validating ' 'multiple docstrings (ignored when validating one).' 'It can be {}'.format(str(format_opts)[1:-1])) argparser.add_argument('--prefix', default=None, help='pattern for the ' 'docstring names, in order to decide which ones ' 'will be validated. A prefix "pandas.Series.str.' 'will make the script validate all the docstrings' 'of methods starting by this pattern. It is ' 'ignored if parameter function is provided') argparser.add_argument('--errors', default=None, help='comma separated ' 'list of error codes to validate. By default it ' 'validates all errors (ignored when validating ' 'a single docstring)') argparser.add_argument('--ignore_deprecated', default=False, action='store_true', help='if this flag is set, ' 'deprecated objects are ignored when validating ' 'all docstrings') args = argparser.parse_args() sys.exit(main(args.function, args.prefix, args.errors.split(',') if args.errors else None, args.format, args.ignore_deprecated))
36.588117
79
0.572168
import os import sys import json import re import glob import functools import collections import argparse import pydoc import inspect import importlib import doctest import tempfile import ast import textwrap import flake8.main.application try: from io import StringIO except ImportError: from cStringIO import StringIO os.environ['MPLBACKEND'] = 'Template' import matplotlib matplotlib.rc('figure', max_open_warning=10000) import numpy BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, os.path.join(BASE_PATH)) import pandas from pandas.compat import signature sys.path.insert(1, os.path.join(BASE_PATH, 'doc', 'sphinxext')) from numpydoc.docscrape import NumpyDocString from pandas.io.formats.printing import pprint_thing PRIVATE_CLASSES = ['NDFrame', 'IndexOpsMixin'] DIRECTIVES = ['versionadded', 'versionchanged', 'deprecated'] ALLOWED_SECTIONS = ['Parameters', 'Attributes', 'Methods', 'Returns', 'Yields', 'Other Parameters', 'Raises', 'Warns', 'See Also', 'Notes', 'References', 'Examples'] ERROR_MSGS = { 'GL01': 'Docstring text (summary) should start in the line immediately ' 'after the opening quotes (not in the same line, or leaving a ' 'blank line in between)', 'GL02': 'Closing quotes should be placed in the line after the last text ' 'in the docstring (do not close the quotes in the same line as ' 'the text, or leave a blank line between the last text and the ' 'quotes)', 'GL03': 'Double line break found; please use only one blank line to ' 'separate sections or paragraphs, and do not leave blank lines ' 'at the end of docstrings', 'GL04': 'Private classes ({mentioned_private_classes}) should not be ' 'mentioned in public docstrings', 'GL05': 'Tabs found at the start of line "{line_with_tabs}", please use ' 'whitespace only', 'GL06': 'Found unknown section "{section}". Allowed sections are: ' '{allowed_sections}', 'GL07': 'Sections are in the wrong order. Correct order is: ' '{correct_sections}', 'GL08': 'The object does not have a docstring', 'GL09': 'Deprecation warning should precede extended summary', 'SS01': 'No summary found (a short summary in a single line should be ' 'present at the beginning of the docstring)', 'SS02': 'Summary does not start with a capital letter', 'SS03': 'Summary does not end with a period', 'SS04': 'Summary contains heading whitespaces', 'SS05': 'Summary must start with infinitive verb, not third person ' '(e.g. use "Generate" instead of "Generates")', 'SS06': 'Summary should fit in a single line', 'ES01': 'No extended summary found', 'PR01': 'Parameters {missing_params} not documented', 'PR02': 'Unknown parameters {unknown_params}', 'PR03': 'Wrong parameters order. Actual: {actual_params}. ' 'Documented: {documented_params}', 'PR04': 'Parameter "{param_name}" has no type', 'PR05': 'Parameter "{param_name}" type should not finish with "."', 'PR06': 'Parameter "{param_name}" type should use "{right_type}" instead ' 'of "{wrong_type}"', 'PR07': 'Parameter "{param_name}" has no description', 'PR08': 'Parameter "{param_name}" description should start with a ' 'capital letter', 'PR09': 'Parameter "{param_name}" description should finish with "."', 'PR10': 'Parameter "{param_name}" requires a space before the colon ' 'separating the parameter name and type', 'RT01': 'No Returns section found', 'RT02': 'The first line of the Returns section should contain only the ' 'type, unless multiple values are being returned', 'RT03': 'Return value has no description', 'RT04': 'Return value description should start with a capital letter', 'RT05': 'Return value description should finish with "."', 'YD01': 'No Yields section found', 'SA01': 'See Also section not found', 'SA02': 'Missing period at end of description for See Also ' '"{reference_name}" reference', 'SA03': 'Description should be capitalized for See Also ' '"{reference_name}" reference', 'SA04': 'Missing description for See Also "{reference_name}" reference', 'SA05': '{reference_name} in `See Also` section does not need `pandas` ' 'prefix, use {right_reference} instead.', 'EX01': 'No examples section found', 'EX02': 'Examples do not pass tests:\n{doctest_log}', 'EX03': 'flake8 error: {error_code} {error_message}{times_happening}', 'EX04': 'Do not import {imported_library}, as it is imported ' 'automatically for the examples (numpy as np, pandas as pd)', } def error(code, **kwargs): return (code, ERROR_MSGS[code].format(**kwargs)) def get_api_items(api_doc_fd): current_module = 'pandas' previous_line = current_section = current_subsection = '' position = None for line in api_doc_fd: line = line.strip() if len(line) == len(previous_line): if set(line) == set('-'): current_section = previous_line continue if set(line) == set('~'): current_subsection = previous_line continue if line.startswith('.. currentmodule::'): current_module = line.replace('.. currentmodule::', '').strip() continue if line == '.. autosummary::': position = 'autosummary' continue if position == 'autosummary': if line == '': position = 'items' continue if position == 'items': if line == '': position = None continue item = line.strip() func = importlib.import_module(current_module) for part in item.split('.'): func = getattr(func, part) yield ('.'.join([current_module, item]), func, current_section, current_subsection) previous_line = line class Docstring(object): def __init__(self, name): self.name = name obj = self._load_obj(name) self.obj = obj self.code_obj = self._to_original_callable(obj) self.raw_doc = obj.__doc__ or '' self.clean_doc = pydoc.getdoc(obj) self.doc = NumpyDocString(self.clean_doc) def __len__(self): return len(self.raw_doc) @staticmethod def _load_obj(name): for maxsplit in range(1, name.count('.') + 1): # TODO when py3 only replace by: module, *func_parts = ... func_name_split = name.rsplit('.', maxsplit) module = func_name_split[0] func_parts = func_name_split[1:] try: obj = importlib.import_module(module) except ImportError: pass else: continue if 'obj' not in locals(): raise ImportError('No module can be imported ' 'from "{}"'.format(name)) for part in func_parts: obj = getattr(obj, part) return obj @staticmethod def _to_original_callable(obj): while True: if inspect.isfunction(obj) or inspect.isclass(obj): f = inspect.getfile(obj) if f.startswith('<') and f.endswith('>'): return None return obj if inspect.ismethod(obj): obj = obj.__func__ elif isinstance(obj, functools.partial): obj = obj.func elif isinstance(obj, property): obj = obj.fget else: return None @property def type(self): return type(self.obj).__name__ @property def is_function_or_method(self): # TODO(py27): remove ismethod return (inspect.isfunction(self.obj) or inspect.ismethod(self.obj)) @property def source_file_name(self): try: fname = inspect.getsourcefile(self.code_obj) except TypeError: # In some cases the object is something complex like a cython # object that can't be easily introspected. An it's better to # return the source code file of the object as None, than crash pass else: if fname: fname = os.path.relpath(fname, BASE_PATH) return fname @property def source_file_def_line(self): try: return inspect.getsourcelines(self.code_obj)[-1] except (OSError, TypeError): # In some cases the object is something complex like a cython # object that can't be easily introspected. An it's better to # return the line number as None, than crash pass @property def github_url(self): url = 'https://github.com/pandas-dev/pandas/blob/master/' url += '{} self.source_file_def_line) return url @property def start_blank_lines(self): i = None if self.raw_doc: for i, row in enumerate(self.raw_doc.split('\n')): if row.strip(): break return i @property def end_blank_lines(self): i = None if self.raw_doc: for i, row in enumerate(reversed(self.raw_doc.split('\n'))): if row.strip(): break return i @property def double_blank_lines(self): prev = True for row in self.raw_doc.split('\n'): if not prev and not row.strip(): return True prev = row.strip() return False @property def section_titles(self): sections = [] self.doc._doc.reset() while not self.doc._doc.eof(): content = self.doc._read_to_next_section() if (len(content) > 1 and len(content[0]) == len(content[1]) and set(content[1]) == {'-'}): sections.append(content[0]) return sections @property def summary(self): return ' '.join(self.doc['Summary']) @property def num_summary_lines(self): return len(self.doc['Summary']) @property def extended_summary(self): if not self.doc['Extended Summary'] and len(self.doc['Summary']) > 1: return ' '.join(self.doc['Summary']) return ' '.join(self.doc['Extended Summary']) @property def needs_summary(self): return not (bool(self.summary) and bool(self.extended_summary)) @property def doc_parameters(self): return collections.OrderedDict((name, (type_, ''.join(desc))) for name, type_, desc in self.doc['Parameters']) @property def signature_parameters(self): if inspect.isclass(self.obj): if hasattr(self.obj, '_accessors') and ( self.name.split('.')[-1] in self.obj._accessors): # accessor classes have a signature but don't want to show this return tuple() try: sig = signature(self.obj) except (TypeError, ValueError): return tuple() params = sig.args if sig.varargs: params.append("*" + sig.varargs) if sig.keywords: params.append("**" + sig.keywords) params = tuple(params) if params and params[0] in ('self', 'cls'): return params[1:] return params @property def parameter_mismatches(self): errs = [] signature_params = self.signature_parameters doc_params = tuple(self.doc_parameters) missing = set(signature_params) - set(doc_params) if missing: errs.append(error('PR01', missing_params=pprint_thing(missing))) extra = set(doc_params) - set(signature_params) if extra: errs.append(error('PR02', unknown_params=pprint_thing(extra))) if (not missing and not extra and signature_params != doc_params and not (not signature_params and not doc_params)): errs.append(error('PR03', actual_params=signature_params, documented_params=doc_params)) return errs @property def correct_parameters(self): return not bool(self.parameter_mismatches) def parameter_type(self, param): return self.doc_parameters[param][0] def parameter_desc(self, param): desc = self.doc_parameters[param][1] for directive in DIRECTIVES: full_directive = '.. {}'.format(directive) if full_directive in desc: desc = desc[:desc.index(full_directive)] return desc @property def see_also(self): return collections.OrderedDict((name, ''.join(desc)) for name, desc, _ in self.doc['See Also']) @property def examples(self): return self.doc['Examples'] @property def returns(self): return self.doc['Returns'] @property def yields(self): return self.doc['Yields'] @property def method_source(self): try: source = inspect.getsource(self.obj) except TypeError: return '' return textwrap.dedent(source) @property def method_returns_something(self): def get_returns_not_on_nested_functions(node): returns = [node] if isinstance(node, ast.Return) else [] for child in ast.iter_child_nodes(node): if not isinstance(child, ast.FunctionDef): child_returns = get_returns_not_on_nested_functions(child) returns.extend(child_returns) return returns tree = ast.parse(self.method_source).body if tree: returns = get_returns_not_on_nested_functions(tree[0]) return_values = [r.value for r in returns] for i, v in enumerate(return_values): if isinstance(v, ast.NameConstant) and v.value is None: return_values[i] = None return any(return_values) else: return False @property def first_line_ends_in_dot(self): if self.doc: return self.doc.split('\n')[0][-1] == '.' @property def deprecated_with_directive(self): return '.. deprecated:: ' in (self.summary + self.extended_summary) @property def deprecated(self): return (self.name.startswith('pandas.Panel') or self.deprecated_with_directive) @property def mentioned_private_classes(self): return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc] @property def examples_errors(self): flags = doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL finder = doctest.DocTestFinder() runner = doctest.DocTestRunner(optionflags=flags) context = {'np': numpy, 'pd': pandas} error_msgs = '' for test in finder.find(self.raw_doc, self.name, globs=context): f = StringIO() runner.run(test, out=f.write) error_msgs += f.getvalue() return error_msgs @property def examples_source_code(self): lines = doctest.DocTestParser().get_examples(self.raw_doc) return [line.source for line in lines] def validate_pep8(self): if not self.examples: return content = ''.join(('import numpy as np # noqa: F401\n', 'import pandas as pd # noqa: F401\n', *self.examples_source_code)) application = flake8.main.application.Application() application.initialize(["--quiet"]) with tempfile.NamedTemporaryFile(mode='w') as file: file.write(content) file.flush() application.run_checks([file.name]) application.formatter.write = lambda line, source: None application.report() yield from application.guide.stats.statistics_for('') def get_validation_data(doc): errs = [] wrns = [] if not doc.raw_doc: errs.append(error('GL08')) return errs, wrns, '' if doc.start_blank_lines != 1: errs.append(error('GL01')) if doc.end_blank_lines != 1: errs.append(error('GL02')) if doc.double_blank_lines: errs.append(error('GL03')) mentioned_errs = doc.mentioned_private_classes if mentioned_errs: errs.append(error('GL04', mentioned_private_classes=', '.join(mentioned_errs))) for line in doc.raw_doc.splitlines(): if re.match("^ *\t", line): errs.append(error('GL05', line_with_tabs=line.lstrip())) unexpected_sections = [section for section in doc.section_titles if section not in ALLOWED_SECTIONS] for section in unexpected_sections: errs.append(error('GL06', section=section, allowed_sections=', '.join(ALLOWED_SECTIONS))) correct_order = [section for section in ALLOWED_SECTIONS if section in doc.section_titles] if correct_order != doc.section_titles: errs.append(error('GL07', correct_sections=', '.join(correct_order))) if (doc.deprecated_with_directive and not doc.extended_summary.startswith('.. deprecated:: ')): errs.append(error('GL09')) if not doc.summary: errs.append(error('SS01')) else: if not doc.summary[0].isupper(): errs.append(error('SS02')) if doc.summary[-1] != '.': errs.append(error('SS03')) if doc.summary != doc.summary.lstrip(): errs.append(error('SS04')) elif (doc.is_function_or_method and doc.summary.split(' ')[0][-1] == 's'): errs.append(error('SS05')) if doc.num_summary_lines > 1: errs.append(error('SS06')) if not doc.extended_summary: wrns.append(('ES01', 'No extended summary found')) errs += doc.parameter_mismatches for param in doc.doc_parameters: if not param.startswith("*"): if not doc.parameter_type(param): if ':' in param: errs.append(error('PR10', param_name=param.split(':')[0])) else: errs.append(error('PR04', param_name=param)) else: if doc.parameter_type(param)[-1] == '.': errs.append(error('PR05', param_name=param)) common_type_errors = [('integer', 'int'), ('boolean', 'bool'), ('string', 'str')] for wrong_type, right_type in common_type_errors: if wrong_type in doc.parameter_type(param): errs.append(error('PR06', param_name=param, right_type=right_type, wrong_type=wrong_type)) if not doc.parameter_desc(param): errs.append(error('PR07', param_name=param)) else: if not doc.parameter_desc(param)[0].isupper(): errs.append(error('PR08', param_name=param)) if doc.parameter_desc(param)[-1] != '.': errs.append(error('PR09', param_name=param)) if doc.is_function_or_method: if not doc.returns: if doc.method_returns_something: errs.append(error('RT01')) else: if len(doc.returns) == 1 and doc.returns[0][1]: errs.append(error('RT02')) for name_or_type, type_, desc in doc.returns: if not desc: errs.append(error('RT03')) else: desc = ' '.join(desc) if not desc[0].isupper(): errs.append(error('RT04')) if not desc.endswith('.'): errs.append(error('RT05')) if not doc.yields and 'yield' in doc.method_source: errs.append(error('YD01')) if not doc.see_also: wrns.append(error('SA01')) else: for rel_name, rel_desc in doc.see_also.items(): if rel_desc: if not rel_desc.endswith('.'): errs.append(error('SA02', reference_name=rel_name)) if not rel_desc[0].isupper(): errs.append(error('SA03', reference_name=rel_name)) else: errs.append(error('SA04', reference_name=rel_name)) if rel_name.startswith('pandas.'): errs.append(error('SA05', reference_name=rel_name, right_reference=rel_name[len('pandas.'):])) examples_errs = '' if not doc.examples: wrns.append(error('EX01')) else: examples_errs = doc.examples_errors if examples_errs: errs.append(error('EX02', doctest_log=examples_errs)) for err in doc.validate_pep8(): errs.append(error('EX03', error_code=err.error_code, error_message=err.message, times_happening=' ({} times)'.format(err.count) if err.count > 1 else '')) examples_source_code = ''.join(doc.examples_source_code) for wrong_import in ('numpy', 'pandas'): if 'import {}'.format(wrong_import) in examples_source_code: errs.append(error('EX04', imported_library=wrong_import)) return errs, wrns, examples_errs def validate_one(func_name): doc = Docstring(func_name) errs, wrns, examples_errs = get_validation_data(doc) return {'type': doc.type, 'docstring': doc.clean_doc, 'deprecated': doc.deprecated, 'file': doc.source_file_name, 'file_line': doc.source_file_def_line, 'github_link': doc.github_url, 'errors': errs, 'warnings': wrns, 'examples_errors': examples_errs} def validate_all(prefix, ignore_deprecated=False): result = {} seen = {} api_doc_fnames = os.path.join( BASE_PATH, 'doc', 'source', 'reference', '*.rst') api_items = [] for api_doc_fname in glob.glob(api_doc_fnames): with open(api_doc_fname) as f: api_items += list(get_api_items(f)) for func_name, func_obj, section, subsection in api_items: if prefix and not func_name.startswith(prefix): continue doc_info = validate_one(func_name) if ignore_deprecated and doc_info['deprecated']: continue result[func_name] = doc_info shared_code_key = doc_info['file'], doc_info['file_line'] shared_code = seen.get(shared_code_key, '') result[func_name].update({'in_api': True, 'section': section, 'subsection': subsection, 'shared_code_with': shared_code}) seen[shared_code_key] = func_name api_item_names = set(list(zip(*api_items))[0]) for class_ in (pandas.Series, pandas.DataFrame, pandas.Panel): for member in inspect.getmembers(class_): func_name = 'pandas.{}.{}'.format(class_.__name__, member[0]) if (not member[0].startswith('_') and func_name not in api_item_names): if prefix and not func_name.startswith(prefix): continue doc_info = validate_one(func_name) if ignore_deprecated and doc_info['deprecated']: continue result[func_name] = doc_info result[func_name]['in_api'] = False return result def main(func_name, prefix, errors, output_format, ignore_deprecated): def header(title, width=80, char='#'): full_line = char * width side_len = (width - len(title) - 2) // 2 adj = '' if len(title) % 2 == 0 else ' ' title_line = '{side} {title}{adj} {side}'.format(side=char * side_len, title=title, adj=adj) return '\n{full_line}\n{title_line}\n{full_line}\n\n'.format( full_line=full_line, title_line=title_line) exit_status = 0 if func_name is None: result = validate_all(prefix, ignore_deprecated) if output_format == 'json': output = json.dumps(result) else: if output_format == 'default': output_format = '{text}\n' elif output_format == 'azure': output_format = ('##vso[task.logissue type=error;' 'sourcepath={path};' 'linenumber={row};' 'code={code};' ']{text}\n') else: raise ValueError('Unknown output_format "{}"'.format( output_format)) output = '' for name, res in result.items(): for err_code, err_desc in res['errors']: # initially. But that would complicate the code too much if errors and err_code not in errors: continue exit_status += 1 output += output_format.format( name=name, path=res['file'], row=res['file_line'], code=err_code, text='{}: {}'.format(name, err_desc)) sys.stdout.write(output) else: result = validate_one(func_name) sys.stderr.write(header('Docstring ({})'.format(func_name))) sys.stderr.write('{}\n'.format(result['docstring'])) sys.stderr.write(header('Validation')) if result['errors']: sys.stderr.write('{} Errors found:\n'.format( len(result['errors']))) for err_code, err_desc in result['errors']: # Failing examples are printed at the end if err_code == 'EX02': sys.stderr.write('\tExamples do not pass tests\n') continue sys.stderr.write('\t{}\n'.format(err_desc)) if result['warnings']: sys.stderr.write('{} Warnings found:\n'.format( len(result['warnings']))) for wrn_code, wrn_desc in result['warnings']: sys.stderr.write('\t{}\n'.format(wrn_desc)) if not result['errors']: sys.stderr.write('Docstring for "{}" correct. :)\n'.format( func_name)) if result['examples_errors']: sys.stderr.write(header('Doctests')) sys.stderr.write(result['examples_errors']) return exit_status if __name__ == '__main__': format_opts = 'default', 'json', 'azure' func_help = ('function or method to validate (e.g. pandas.DataFrame.head) ' 'if not provided, all docstrings are validated and returned ' 'as JSON') argparser = argparse.ArgumentParser( description='validate pandas docstrings') argparser.add_argument('function', nargs='?', default=None, help=func_help) argparser.add_argument('--format', default='default', choices=format_opts, help='format of the output when validating ' 'multiple docstrings (ignored when validating one).' 'It can be {}'.format(str(format_opts)[1:-1])) argparser.add_argument('--prefix', default=None, help='pattern for the ' 'docstring names, in order to decide which ones ' 'will be validated. A prefix "pandas.Series.str.' 'will make the script validate all the docstrings' 'of methods starting by this pattern. It is ' 'ignored if parameter function is provided') argparser.add_argument('--errors', default=None, help='comma separated ' 'list of error codes to validate. By default it ' 'validates all errors (ignored when validating ' 'a single docstring)') argparser.add_argument('--ignore_deprecated', default=False, action='store_true', help='if this flag is set, ' 'deprecated objects are ignored when validating ' 'all docstrings') args = argparser.parse_args() sys.exit(main(args.function, args.prefix, args.errors.split(',') if args.errors else None, args.format, args.ignore_deprecated))
true
true
1c45c79cdc783d17fc365a898f4c6a3109e9d344
2,508
py
Python
api_app/models/schemas/workspace.py
tanya-borisova/AzureTRE
02e1745785a75a7dc676d9b9853ae4d4de7d87af
[ "MIT" ]
null
null
null
api_app/models/schemas/workspace.py
tanya-borisova/AzureTRE
02e1745785a75a7dc676d9b9853ae4d4de7d87af
[ "MIT" ]
1
2022-02-02T14:52:06.000Z
2022-02-02T15:00:01.000Z
api_app/models/schemas/workspace.py
tanya-borisova/AzureTRE
02e1745785a75a7dc676d9b9853ae4d4de7d87af
[ "MIT" ]
null
null
null
from enum import Enum from typing import List from pydantic import BaseModel, Field from models.domain.resource import ResourceType from models.domain.workspace import Workspace def get_sample_workspace(workspace_id: str, spec_workspace_id: str = "0001") -> dict: return { "id": workspace_id, "isActive": True, "templateName": "tre-workspace-base", "templateVersion": "0.1.0", "properties": { "azure_location": "westeurope", "workspace_id": spec_workspace_id, "tre_id": "mytre-dev-1234", "address_space_size": "small", }, "resourceType": ResourceType.Workspace, "workspaceURL": "", "authInformation": {} } class AuthProvider(str, Enum): """ Auth Provider """ AAD = "AAD" class AuthenticationConfiguration(BaseModel): provider: AuthProvider = Field(AuthProvider.AAD, title="Authentication Provider") data: dict = Field({}, title="Authentication information") class WorkspaceInResponse(BaseModel): workspace: Workspace class Config: schema_extra = { "example": { "workspace": get_sample_workspace("933ad738-7265-4b5f-9eae-a1a62928772e") } } class WorkspacesInList(BaseModel): workspaces: List[Workspace] class Config: schema_extra = { "example": { "workspaces": [ get_sample_workspace("933ad738-7265-4b5f-9eae-a1a62928772e", "0001"), get_sample_workspace("2fdc9fba-726e-4db6-a1b8-9018a2165748", "0002"), ] } } class WorkspaceInCreate(BaseModel): templateName: str = Field(title="Workspace type", description="Bundle name") properties: dict = Field({}, title="Workspace parameters", description="Values for the parameters required by the workspace resource specification") class Config: schema_extra = { "example": { "templateName": "tre-workspace-base", "properties": { "display_name": "the workspace display name", "description": "workspace description", "app_id": "9d52b04f-89cf-47b4-868a-e12be7133b36" } } } class WorkspacePatchEnabled(BaseModel): enabled: bool class Config: schema_extra = { "example": { "enabled": False } }
27.56044
152
0.585726
from enum import Enum from typing import List from pydantic import BaseModel, Field from models.domain.resource import ResourceType from models.domain.workspace import Workspace def get_sample_workspace(workspace_id: str, spec_workspace_id: str = "0001") -> dict: return { "id": workspace_id, "isActive": True, "templateName": "tre-workspace-base", "templateVersion": "0.1.0", "properties": { "azure_location": "westeurope", "workspace_id": spec_workspace_id, "tre_id": "mytre-dev-1234", "address_space_size": "small", }, "resourceType": ResourceType.Workspace, "workspaceURL": "", "authInformation": {} } class AuthProvider(str, Enum): AAD = "AAD" class AuthenticationConfiguration(BaseModel): provider: AuthProvider = Field(AuthProvider.AAD, title="Authentication Provider") data: dict = Field({}, title="Authentication information") class WorkspaceInResponse(BaseModel): workspace: Workspace class Config: schema_extra = { "example": { "workspace": get_sample_workspace("933ad738-7265-4b5f-9eae-a1a62928772e") } } class WorkspacesInList(BaseModel): workspaces: List[Workspace] class Config: schema_extra = { "example": { "workspaces": [ get_sample_workspace("933ad738-7265-4b5f-9eae-a1a62928772e", "0001"), get_sample_workspace("2fdc9fba-726e-4db6-a1b8-9018a2165748", "0002"), ] } } class WorkspaceInCreate(BaseModel): templateName: str = Field(title="Workspace type", description="Bundle name") properties: dict = Field({}, title="Workspace parameters", description="Values for the parameters required by the workspace resource specification") class Config: schema_extra = { "example": { "templateName": "tre-workspace-base", "properties": { "display_name": "the workspace display name", "description": "workspace description", "app_id": "9d52b04f-89cf-47b4-868a-e12be7133b36" } } } class WorkspacePatchEnabled(BaseModel): enabled: bool class Config: schema_extra = { "example": { "enabled": False } }
true
true
1c45c8088030d2b6425eb6a785a0705fba310bdf
694
py
Python
Menu/BaseScripts/updateBlock.py
fortiersteven/Narikiri-Dungeon-X
49e5716fa5aa81a25048bcbe212eb74828cf0e10
[ "MIT" ]
10
2021-06-04T10:17:48.000Z
2022-01-23T13:23:37.000Z
Menu/BaseScripts/updateBlock.py
fortiersteven/Narikiri-Dungeon-X
49e5716fa5aa81a25048bcbe212eb74828cf0e10
[ "MIT" ]
1
2021-06-05T17:05:04.000Z
2021-06-05T17:05:04.000Z
Menu/BaseScripts/updateBlock.py
fortiersteven/Narikiri-Dungeon-X
49e5716fa5aa81a25048bcbe212eb74828cf0e10
[ "MIT" ]
4
2021-05-21T11:21:04.000Z
2022-01-06T18:50:12.000Z
from HelperfunctionsNew import * import sys import os if __name__ == "__main__": blockDesc = sys.argv[1] helper = Helper() herlper.get if blockDesc in ["Skit Name", "Synopsis", "Minigame"]: helper.createBlock_Multi(blockDesc) elif blockDesc != "All": print("Create the script based on google sheet") helper.createAtlasScript_Block(blockDesc) print("Create the SLPS for this block") helper.reinsertText_Block(blockDesc) else: helper.createAtlasScript_All() print("Create the SLPS for this block") helper.reinsertText_All(blockDesc)
23.133333
58
0.602305
from HelperfunctionsNew import * import sys import os if __name__ == "__main__": blockDesc = sys.argv[1] helper = Helper() herlper.get if blockDesc in ["Skit Name", "Synopsis", "Minigame"]: helper.createBlock_Multi(blockDesc) elif blockDesc != "All": print("Create the script based on google sheet") helper.createAtlasScript_Block(blockDesc) print("Create the SLPS for this block") helper.reinsertText_Block(blockDesc) else: helper.createAtlasScript_All() print("Create the SLPS for this block") helper.reinsertText_All(blockDesc)
true
true
1c45c86d301eb86719539dd517c54c2d7968b0d2
2,061
py
Python
sdk/python/pulumi_aws/__init__.py
Charliekenney23/pulumi-aws
55bd0390160d27350b297834026fee52114a2d41
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_aws/__init__.py
Charliekenney23/pulumi-aws
55bd0390160d27350b297834026fee52114a2d41
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_aws/__init__.py
Charliekenney23/pulumi-aws
55bd0390160d27350b297834026fee52114a2d41
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import importlib # Make subpackages available: __all__ = ['acm', 'acmpca', 'apigateway', 'appautoscaling', 'applicationloadbalancing', 'appmesh', 'appsync', 'athena', 'autoscaling', 'backup', 'batch', 'budgets', 'cfg', 'cloud9', 'cloudformation', 'cloudfront', 'cloudhsmv2', 'cloudtrail', 'cloudwatch', 'codebuild', 'codecommit', 'codedeploy', 'codepipeline', 'cognito', 'config', 'cur', 'datasync', 'dax', 'devicefarm', 'directconnect', 'directoryservice', 'dlm', 'dms', 'docdb', 'dynamodb', 'ebs', 'ec2', 'ec2clientvpn', 'ec2transitgateway', 'ecr', 'ecs', 'efs', 'eks', 'elasticache', 'elasticbeanstalk', 'elasticloadbalancing', 'elasticloadbalancingv2', 'elasticsearch', 'elastictranscoder', 'emr', 'gamelift', 'glacier', 'globalaccelerator', 'glue', 'guardduty', 'iam', 'inspector', 'iot', 'kinesis', 'kms', 'lambda_', 'licensemanager', 'lightsail', 'macie', 'mediapackage', 'mediastore', 'mq', 'msk', 'neptune', 'opsworks', 'organizations', 'pinpoint', 'pricing', 'ram', 'rds', 'redshift', 'resourcegroups', 'route53', 's3', 'sagemaker', 'secretsmanager', 'securityhub', 'servicecatalog', 'servicediscovery', 'ses', 'sfn', 'shield', 'simpledb', 'sns', 'sqs', 'ssm', 'storagegateway', 'swf', 'transfer', 'waf', 'wafregional', 'worklink', 'workspaces', 'xray'] for pkg in __all__: if pkg != 'config': importlib.import_module(f'{__name__}.{pkg}') # Export this package's modules as members: from .get_ami import * from .get_ami_ids import * from .get_arn import * from .get_autoscaling_groups import * from .get_availability_zone import * from .get_availability_zones import * from .get_billing_service_account import * from .get_caller_identity import * from .get_canonical_user_id import * from .get_elastic_ip import * from .get_ip_ranges import * from .get_partition import * from .get_prefix_list import * from .get_region import * from .provider import *
73.607143
1,216
0.706938
import importlib # Make subpackages available: __all__ = ['acm', 'acmpca', 'apigateway', 'appautoscaling', 'applicationloadbalancing', 'appmesh', 'appsync', 'athena', 'autoscaling', 'backup', 'batch', 'budgets', 'cfg', 'cloud9', 'cloudformation', 'cloudfront', 'cloudhsmv2', 'cloudtrail', 'cloudwatch', 'codebuild', 'codecommit', 'codedeploy', 'codepipeline', 'cognito', 'config', 'cur', 'datasync', 'dax', 'devicefarm', 'directconnect', 'directoryservice', 'dlm', 'dms', 'docdb', 'dynamodb', 'ebs', 'ec2', 'ec2clientvpn', 'ec2transitgateway', 'ecr', 'ecs', 'efs', 'eks', 'elasticache', 'elasticbeanstalk', 'elasticloadbalancing', 'elasticloadbalancingv2', 'elasticsearch', 'elastictranscoder', 'emr', 'gamelift', 'glacier', 'globalaccelerator', 'glue', 'guardduty', 'iam', 'inspector', 'iot', 'kinesis', 'kms', 'lambda_', 'licensemanager', 'lightsail', 'macie', 'mediapackage', 'mediastore', 'mq', 'msk', 'neptune', 'opsworks', 'organizations', 'pinpoint', 'pricing', 'ram', 'rds', 'redshift', 'resourcegroups', 'route53', 's3', 'sagemaker', 'secretsmanager', 'securityhub', 'servicecatalog', 'servicediscovery', 'ses', 'sfn', 'shield', 'simpledb', 'sns', 'sqs', 'ssm', 'storagegateway', 'swf', 'transfer', 'waf', 'wafregional', 'worklink', 'workspaces', 'xray'] for pkg in __all__: if pkg != 'config': importlib.import_module(f'{__name__}.{pkg}') # Export this package's modules as members: from .get_ami import * from .get_ami_ids import * from .get_arn import * from .get_autoscaling_groups import * from .get_availability_zone import * from .get_availability_zones import * from .get_billing_service_account import * from .get_caller_identity import * from .get_canonical_user_id import * from .get_elastic_ip import * from .get_ip_ranges import * from .get_partition import * from .get_prefix_list import * from .get_region import * from .provider import *
true
true
1c45c9928167414ac58f3e156afea4d5426540e6
122
py
Python
ecommerce/api/category/admin.py
jigyasudhingra/E-commerce-Store-Using-React-And-Django
128e0e3d78dd7aca309c851eff2d02e2452d4d1f
[ "MIT" ]
1
2021-12-04T08:47:29.000Z
2021-12-04T08:47:29.000Z
ecommerce/api/category/admin.py
jigyasudhingra/E-commerce-Store-Using-React-And-Django
128e0e3d78dd7aca309c851eff2d02e2452d4d1f
[ "MIT" ]
null
null
null
ecommerce/api/category/admin.py
jigyasudhingra/E-commerce-Store-Using-React-And-Django
128e0e3d78dd7aca309c851eff2d02e2452d4d1f
[ "MIT" ]
1
2021-05-15T07:23:37.000Z
2021-05-15T07:23:37.000Z
from django.contrib import admin from .models import Category # Register your models here. admin.site.register(Category)
20.333333
32
0.811475
from django.contrib import admin from .models import Category admin.site.register(Category)
true
true
1c45ca09ffeae3aabe2a3a4553e18bbea7714321
595
py
Python
pyaz/eventgrid/topic_type/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
null
null
null
pyaz/eventgrid/topic_type/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
null
null
null
pyaz/eventgrid/topic_type/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
1
2022-02-03T09:12:01.000Z
2022-02-03T09:12:01.000Z
from ... pyaz_utils import _call_az def list(): ''' List registered topic types. ''' return _call_az("az eventgrid topic-type list", locals()) def show(name): ''' Get the details for a topic type. Required Parameters: - name -- Name of the topic type. ''' return _call_az("az eventgrid topic-type show", locals()) def list_event_types(name): ''' List the event types supported by a topic type. Required Parameters: - name -- Name of the topic type. ''' return _call_az("az eventgrid topic-type list-event-types", locals())
20.517241
73
0.636975
from ... pyaz_utils import _call_az def list(): return _call_az("az eventgrid topic-type list", locals()) def show(name): return _call_az("az eventgrid topic-type show", locals()) def list_event_types(name): return _call_az("az eventgrid topic-type list-event-types", locals())
true
true
1c45ca0d1bfde8aae6ad6466c099bac46b3121a0
4,702
py
Python
sdk/python/pulumi_aws/iot/thing_principal_attachment.py
sibuthomasmathew/pulumi-aws
6351f2182eb6f693d4e09e4136c385adfa0ab674
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_aws/iot/thing_principal_attachment.py
sibuthomasmathew/pulumi-aws
6351f2182eb6f693d4e09e4136c385adfa0ab674
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_aws/iot/thing_principal_attachment.py
sibuthomasmathew/pulumi-aws
6351f2182eb6f693d4e09e4136c385adfa0ab674
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables __all__ = ['ThingPrincipalAttachment'] class ThingPrincipalAttachment(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, principal: Optional[pulumi.Input[str]] = None, thing: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ Attaches Principal to AWS IoT Thing. ## Example Usage ```python import pulumi import pulumi_aws as aws example = aws.iot.Thing("example") cert = aws.iot.Certificate("cert", csr=(lambda path: open(path).read())("csr.pem"), active=True) att = aws.iot.ThingPrincipalAttachment("att", principal=cert.arn, thing=example.name) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] principal: The AWS IoT Certificate ARN or Amazon Cognito Identity ID. :param pulumi.Input[str] thing: The name of the thing. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if principal is None and not opts.urn: raise TypeError("Missing required property 'principal'") __props__['principal'] = principal if thing is None and not opts.urn: raise TypeError("Missing required property 'thing'") __props__['thing'] = thing super(ThingPrincipalAttachment, __self__).__init__( 'aws:iot/thingPrincipalAttachment:ThingPrincipalAttachment', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, principal: Optional[pulumi.Input[str]] = None, thing: Optional[pulumi.Input[str]] = None) -> 'ThingPrincipalAttachment': """ Get an existing ThingPrincipalAttachment resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] principal: The AWS IoT Certificate ARN or Amazon Cognito Identity ID. :param pulumi.Input[str] thing: The name of the thing. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["principal"] = principal __props__["thing"] = thing return ThingPrincipalAttachment(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def principal(self) -> pulumi.Output[str]: """ The AWS IoT Certificate ARN or Amazon Cognito Identity ID. """ return pulumi.get(self, "principal") @property @pulumi.getter def thing(self) -> pulumi.Output[str]: """ The name of the thing. """ return pulumi.get(self, "thing") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
38.859504
134
0.630795
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables __all__ = ['ThingPrincipalAttachment'] class ThingPrincipalAttachment(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, principal: Optional[pulumi.Input[str]] = None, thing: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if principal is None and not opts.urn: raise TypeError("Missing required property 'principal'") __props__['principal'] = principal if thing is None and not opts.urn: raise TypeError("Missing required property 'thing'") __props__['thing'] = thing super(ThingPrincipalAttachment, __self__).__init__( 'aws:iot/thingPrincipalAttachment:ThingPrincipalAttachment', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, principal: Optional[pulumi.Input[str]] = None, thing: Optional[pulumi.Input[str]] = None) -> 'ThingPrincipalAttachment': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["principal"] = principal __props__["thing"] = thing return ThingPrincipalAttachment(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def principal(self) -> pulumi.Output[str]: return pulumi.get(self, "principal") @property @pulumi.getter def thing(self) -> pulumi.Output[str]: return pulumi.get(self, "thing") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
true
true
1c45cb82a5676f74fee689944bd273eb30fc85e0
4,151
py
Python
Tests/Plot/LamWind/test_Slot_LSRPM_plot.py
tobsen2code/pyleecan
5b1ded9e389e0c79ed7b7c878b6e939f2d9962e9
[ "Apache-2.0" ]
95
2019-01-23T04:19:45.000Z
2022-03-17T18:22:10.000Z
Tests/Plot/LamWind/test_Slot_LSRPM_plot.py
Eomys/Pyleecan
4d7f0cbabf0311006963e7a2f435db2ecd901118
[ "Apache-2.0" ]
366
2019-02-20T07:15:08.000Z
2022-03-31T13:37:23.000Z
Tests/Plot/LamWind/test_Slot_LSRPM_plot.py
Eomys/Pyleecan
4d7f0cbabf0311006963e7a2f435db2ecd901118
[ "Apache-2.0" ]
74
2019-01-24T01:47:31.000Z
2022-02-25T05:44:42.000Z
# -*- coding: utf-8 -*- from os.path import join import pytest import matplotlib.pyplot as plt from numpy import array, pi, zeros from pyleecan.Classes.Frame import Frame from pyleecan.Classes.LamSlotWind import LamSlotWind from pyleecan.Classes.LamSquirrelCage import LamSquirrelCage from pyleecan.Classes.MachineDFIM import MachineDFIM from pyleecan.Classes.Shaft import Shaft from pyleecan.Classes.VentilationCirc import VentilationCirc from pyleecan.Classes.VentilationPolar import VentilationPolar from pyleecan.Classes.VentilationTrap import VentilationTrap from pyleecan.Classes.Winding import Winding from pyleecan.Classes.WindingUD import WindingUD from pyleecan.Classes.SlotWLSRPM import SlotWLSRPM from Tests import save_plot_path as save_path # from Tests.Plot.LamWind import wind_mat, wind_mat2 """unittest for Lamination with winding plot""" @pytest.fixture def machine(): """Run at the begining of every test to setup the machine""" plt.close("all") test_obj = LamSlotWind( Rint=50.7e-3, Rext=72.5e-3, is_internal=False, is_stator=True, L1=0.95, Nrvd=0, Wrvd=0, ) test_obj.slot = SlotWLSRPM( Zs=12, W1=8e-3, W3=11.6e-3, H2=14.8e-3, R1=0.75e-3, H3=2e-3 ) return test_obj # wind_mat = zeros((2, 2, 6, 4)) # Nrad, Ntan, Zs, qs # wind_mat[0, 0, :, :] = array( # [[1, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, -1, -1, 0], [0, -1, 0, 0, 0, 1]] # ).T # wind_mat[1, 0, :, :] = array( # [[0, 0, 0, 0, 0, 0], [-1, 0, -1, 0, 0, -1], [0, 0, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0]] # ).T # wind_mat[0, 1, :, :] = array( # [[-1, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, -1, 0, 0, -1]] # ).T # wind_mat[1, 1, :, :] = array( # [[0, 0, 0, -1, -1, 0], [1, 0, 0, 0, 0, 1], [0, -1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]] # ).T ### wind_mat_LSRPM = zeros((2, 2, 12, 6)) # Nrad, Ntan, Zs, qs wind_mat_LSRPM[0, 0, :, :] = array( [ [-1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0], [0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0], [0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ).T wind_mat_LSRPM[1, 0, :, :] = array( [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [-1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0], [0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0], [0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1], ] ).T wind_mat_LSRPM[0, 1, :, :] = array( [ [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1], [1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ).T wind_mat_LSRPM[1, 1, :, :] = array( [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1], [1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0], ] ).T def test_Lam_Wind_LSRPM_wind_tan(machine): """Test machine plot with Slot LSRPM and winding rad=1, tan=2""" machine.winding = WindingUD(wind_mat=wind_mat_LSRPM, qs=6, p=4, Lewout=0) machine.plot(is_show_fig=False) fig = plt.gcf() fig.savefig(join(save_path, "test_Lam_Wind_sLSRPM_2-tan-wind.png")) # 2 for lam + Zs*2 for wind # assert len(fig.axes[0].patches) == 26 def test_stator_slot_angle_opening(machine): """Test calculate the angle opening""" machine.slot.comp_angle_opening() def test_stator_slot_height_damper(machine): """Test calculate the damper height""" machine.slot.comp_height_damper() def test_stator_slot_height_wind(machine): """Test calculate the winding height""" machine.slot.comp_height_wind() def test_stator_slot_height(machine): """Test calculate the total height""" machine.slot.comp_height()
29.863309
89
0.530956
from os.path import join import pytest import matplotlib.pyplot as plt from numpy import array, pi, zeros from pyleecan.Classes.Frame import Frame from pyleecan.Classes.LamSlotWind import LamSlotWind from pyleecan.Classes.LamSquirrelCage import LamSquirrelCage from pyleecan.Classes.MachineDFIM import MachineDFIM from pyleecan.Classes.Shaft import Shaft from pyleecan.Classes.VentilationCirc import VentilationCirc from pyleecan.Classes.VentilationPolar import VentilationPolar from pyleecan.Classes.VentilationTrap import VentilationTrap from pyleecan.Classes.Winding import Winding from pyleecan.Classes.WindingUD import WindingUD from pyleecan.Classes.SlotWLSRPM import SlotWLSRPM from Tests import save_plot_path as save_path @pytest.fixture def machine(): plt.close("all") test_obj = LamSlotWind( Rint=50.7e-3, Rext=72.5e-3, is_internal=False, is_stator=True, L1=0.95, Nrvd=0, Wrvd=0, ) test_obj.slot = SlotWLSRPM( Zs=12, W1=8e-3, W3=11.6e-3, H2=14.8e-3, R1=0.75e-3, H3=2e-3 ) return test_obj at_LSRPM = zeros((2, 2, 12, 6)) wind_mat_LSRPM[0, 0, :, :] = array( [ [-1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0], [0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0], [0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ).T wind_mat_LSRPM[1, 0, :, :] = array( [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [-1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0], [0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0], [0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1], ] ).T wind_mat_LSRPM[0, 1, :, :] = array( [ [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1], [1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ).T wind_mat_LSRPM[1, 1, :, :] = array( [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1], [1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0], ] ).T def test_Lam_Wind_LSRPM_wind_tan(machine): machine.winding = WindingUD(wind_mat=wind_mat_LSRPM, qs=6, p=4, Lewout=0) machine.plot(is_show_fig=False) fig = plt.gcf() fig.savefig(join(save_path, "test_Lam_Wind_sLSRPM_2-tan-wind.png")) def test_stator_slot_angle_opening(machine): machine.slot.comp_angle_opening() def test_stator_slot_height_damper(machine): machine.slot.comp_height_damper() def test_stator_slot_height_wind(machine): machine.slot.comp_height_wind() def test_stator_slot_height(machine): machine.slot.comp_height()
true
true
1c45cc01daf7a254c2fe16fed376b3fc58df574f
1,643
py
Python
src/nucleotide/component/windows/msvc/atom/version.py
dmilos/nucleotide
aad5d60508c9e4baf4888069284f2cb5c9fd7c55
[ "Apache-2.0" ]
1
2020-09-04T13:00:04.000Z
2020-09-04T13:00:04.000Z
src/nucleotide/component/windows/msvc/atom/version.py
dmilos/nucleotide
aad5d60508c9e4baf4888069284f2cb5c9fd7c55
[ "Apache-2.0" ]
1
2020-04-10T01:52:32.000Z
2020-04-10T09:11:29.000Z
src/nucleotide/component/windows/msvc/atom/version.py
dmilos/nucleotide
aad5d60508c9e4baf4888069284f2cb5c9fd7c55
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python2 # Copyright 2015 Dejan D. M. Milosavljevic # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import nucleotide import nucleotide.component import nucleotide.component.function def _windows_version_MSVC_VERSION( P_data ): if( False == ( 'msvc' in P_data ) ): return None print( " ||" + str( P_data ) + "||" ) return P_data[ 'msvc' ][0] atom_windows_CCVERSION = { 'platform' : { 'host' : 'Windows', 'guest' : 'Windows' }, 'cc' : { 'vendor': 'Microsoft', 'name': 'msvc', 'version': 'X' }, 'config' : { 'MSVC_VERSION' : _windows_version_MSVC_VERSION }, 'name' :'compiler:version', 'class': [ 'compiler:version', 'windows:compiler:version' ] } class Version: def __init__(self): pass @staticmethod def extend(P_option): nucleotide.component.function.extend( P_option, 'windows:compiler:version', atom_windows_CCVERSION ) @staticmethod def check(self): pass
27.847458
109
0.625685
import os import subprocess import nucleotide import nucleotide.component import nucleotide.component.function def _windows_version_MSVC_VERSION( P_data ): if( False == ( 'msvc' in P_data ) ): return None print( " ||" + str( P_data ) + "||" ) return P_data[ 'msvc' ][0] atom_windows_CCVERSION = { 'platform' : { 'host' : 'Windows', 'guest' : 'Windows' }, 'cc' : { 'vendor': 'Microsoft', 'name': 'msvc', 'version': 'X' }, 'config' : { 'MSVC_VERSION' : _windows_version_MSVC_VERSION }, 'name' :'compiler:version', 'class': [ 'compiler:version', 'windows:compiler:version' ] } class Version: def __init__(self): pass @staticmethod def extend(P_option): nucleotide.component.function.extend( P_option, 'windows:compiler:version', atom_windows_CCVERSION ) @staticmethod def check(self): pass
true
true
1c45ccb173f55ce3b5f37cb85aa9fc13c1fdd831
791
py
Python
david/modules/event/admin.py
ktmud/david
4b8d6f804b73cdfa1a8ddf784077fa9a39f1e36f
[ "MIT" ]
2
2016-04-07T08:21:32.000Z
2020-11-26T11:49:20.000Z
david/modules/event/admin.py
ktmud/david
4b8d6f804b73cdfa1a8ddf784077fa9a39f1e36f
[ "MIT" ]
null
null
null
david/modules/event/admin.py
ktmud/david
4b8d6f804b73cdfa1a8ddf784077fa9a39f1e36f
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from david.core.article.admin import ArticleAdmin, ModelAdmin from david.ext.admin import _ from .model import Event class EventAdmin(ArticleAdmin): column_labels = dict( title=_('Title'), slug=_('Slug'), id=_('ID'), tags=_('Tags'), create_at=_('Create at'), update_at=_('Update at'), summary=_('Summary'), link=_('Out link'), content=_('Content') ) column_list = ('id', 'title', 'create_at') column_sortable_list = ('id', 'title') form_columns = ('title', 'content', 'link', 'create_at',) form_widget_args = dict( link=dict(placeholder='http://...') ) views = [ (EventAdmin(Event, name=_('Event')), 20) ]
27.275862
61
0.542351
from david.core.article.admin import ArticleAdmin, ModelAdmin from david.ext.admin import _ from .model import Event class EventAdmin(ArticleAdmin): column_labels = dict( title=_('Title'), slug=_('Slug'), id=_('ID'), tags=_('Tags'), create_at=_('Create at'), update_at=_('Update at'), summary=_('Summary'), link=_('Out link'), content=_('Content') ) column_list = ('id', 'title', 'create_at') column_sortable_list = ('id', 'title') form_columns = ('title', 'content', 'link', 'create_at',) form_widget_args = dict( link=dict(placeholder='http://...') ) views = [ (EventAdmin(Event, name=_('Event')), 20) ]
true
true
1c45ccf0ef4a8a26e47030e104f785132e53c97d
31,133
py
Python
flexmock_test.py
sagara-/flexmock
0b24b769cd04e234d4921089053707a5565aa007
[ "BSD-2-Clause" ]
null
null
null
flexmock_test.py
sagara-/flexmock
0b24b769cd04e234d4921089053707a5565aa007
[ "BSD-2-Clause" ]
null
null
null
flexmock_test.py
sagara-/flexmock
0b24b769cd04e234d4921089053707a5565aa007
[ "BSD-2-Clause" ]
null
null
null
#-*- coding: utf8 -*- from flexmock import FlexMock from flexmock import AlreadyMocked from flexmock import AndExecuteNotSupportedForClassMocks from flexmock import AttemptingToMockBuiltin from flexmock import Expectation from flexmock import FlexmockContainer from flexmock import FlexmockException from flexmock import InvalidMethodSignature from flexmock import InvalidExceptionClass from flexmock import InvalidExceptionMessage from flexmock import MethodDoesNotExist from flexmock import MethodNotCalled from flexmock import MethodCalledOutOfOrder from flexmock import ReturnValue from flexmock import flexmock from flexmock import flexmock_nose from flexmock import _format_args import sys import unittest def module_level_function(some, args): return "%s, %s" % (some, args) def _tear_down(runner): return unittest.TestCase.tearDown(runner) def assertRaises(exception, method, *kargs, **kwargs): try: method(*kargs, **kwargs) except exception: assert True return except: pass raise Exception('%s not raised' % exception.__name__) class TestFlexmock(unittest.TestCase): def test_flexmock_should_create_mock_object(self): mock = flexmock() assert isinstance(mock, FlexMock) def test_flexmock_should_create_mock_object_from_dict(self): mock = flexmock(foo='foo', bar='bar') assert 'foo' == mock.foo assert 'bar' == mock.bar def test_flexmock_should_add_expectations(self): mock = flexmock(name='temp') mock.should_receive('method_foo') assert 'method_foo' in [x.method for x in mock._flexmock_expectations] def test_flexmock_should_return_value(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar') mock.should_receive('method_bar').and_return('value_baz') assert 'value_bar' == mock.method_foo() assert 'value_baz' == mock.method_bar() def test_flexmock_should_accept_shortcuts_for_creating_mock_object(self): mock = flexmock(attr1='value 1', attr2=lambda: 'returning 2') assert 'value 1' == mock.attr1 assert 'returning 2' == mock.attr2() def test_flexmock_should_accept_shortcuts_for_creating_expectations(self): class Foo: def method1(self): pass def method2(self): pass foo = Foo() flexmock(foo, method1='returning 1', method2='returning 2') assert 'returning 1' == foo.method1() assert 'returning 2' == foo.method2() assert 'returning 2' == foo.method2() def test_flexmock_expectations_returns_all(self): mock = flexmock(name='temp') assert 0 == len(mock._flexmock_expectations) mock.should_receive('method_foo') mock.should_receive('method_bar') assert 2 == len(mock._flexmock_expectations) def test_flexmock_expectations_returns_named_expectation(self): mock = flexmock(name='temp') mock.should_receive('method_foo') assert 'method_foo' == mock._get_flexmock_expectation('method_foo').method def test_flexmock_expectations_returns_none_if_not_found(self): mock = flexmock(name='temp') assert mock._get_flexmock_expectation('method_foo') is None def test_flexmock_should_check_parameters(self): mock = flexmock(name='temp') mock.should_receive('method_foo').with_args('bar').and_return(1) mock.should_receive('method_foo').with_args('baz').and_return(2) assert 1 == mock.method_foo('bar') assert 2 == mock.method_foo('baz') def test_flexmock_should_keep_track_of_calls(self): mock = flexmock(name='temp') mock.should_receive('method_foo').with_args('foo').and_return(0) mock.should_receive('method_foo').with_args('bar').and_return(1) mock.should_receive('method_foo').with_args('baz').and_return(2) mock.method_foo('bar') mock.method_foo('bar') mock.method_foo('baz') expectation = mock._get_flexmock_expectation('method_foo', ('foo',)) assert 0 == expectation.times_called expectation = mock._get_flexmock_expectation('method_foo', ('bar',)) assert 2 == expectation.times_called expectation = mock._get_flexmock_expectation('method_foo', ('baz',)) assert 1 == expectation.times_called def test_flexmock_should_set_expectation_call_numbers(self): mock = flexmock(name='temp') mock.should_receive('method_foo').times(1) expectation = mock._get_flexmock_expectation('method_foo') assertRaises(MethodNotCalled, expectation.verify) mock.method_foo() expectation.verify() def test_flexmock_should_check_raised_exceptions(self): mock = flexmock(name='temp') class FakeException(Exception): pass mock.should_receive('method_foo').and_raise(FakeException) assertRaises(FakeException, mock.method_foo) assert 1 == mock._get_flexmock_expectation('method_foo').times_called def test_flexmock_should_check_raised_exceptions_instance_with_args(self): mock = flexmock(name='temp') class FakeException(Exception): def __init__(self, arg, arg2): pass mock.should_receive('method_foo').and_raise(FakeException(1, arg2=2)) assertRaises(FakeException, mock.method_foo) assert 1 == mock._get_flexmock_expectation('method_foo').times_called def test_flexmock_should_check_raised_exceptions_class_with_args(self): mock = flexmock(name='temp') class FakeException(Exception): def __init__(self, arg, arg2): pass mock.should_receive('method_foo').and_raise(FakeException, 1, arg2=2) assertRaises(FakeException, mock.method_foo) assert 1 == mock._get_flexmock_expectation('method_foo').times_called def test_flexmock_should_match_any_args_by_default(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('bar') mock.should_receive('method_foo').with_args('baz').and_return('baz') assert 'bar' == mock.method_foo() assert 'bar' == mock.method_foo(1) assert 'bar', mock.method_foo('foo' == 'bar') assert 'baz' == mock.method_foo('baz') def test_expectation_dot_mock_should_return_mock(self): mock = flexmock(name='temp') assert mock == mock.should_receive('method_foo').mock def test_flexmock_should_create_partial_new_style_object_mock(self): class User(object): def __init__(self, name=None): self.name = name def get_name(self): return self.name def set_name(self, name): self.name = name user = User() flexmock(user) user.should_receive('get_name').and_return('john') user.set_name('mike') assert 'john' == user.get_name() def test_flexmock_should_create_partial_old_style_object_mock(self): class User: def __init__(self, name=None): self.name = name def get_name(self): return self.name def set_name(self, name): self.name = name user = User() flexmock(user) user.should_receive('get_name').and_return('john') user.set_name('mike') assert 'john' == user.get_name() def test_flexmock_should_create_partial_new_style_class_mock(self): class User(object): def __init__(self): pass def get_name(self): pass flexmock(User) User.should_receive('get_name').and_return('mike') user = User() assert 'mike' == user.get_name() def test_flexmock_should_create_partial_old_style_class_mock(self): class User: def __init__(self): pass def get_name(self): pass flexmock(User) User.should_receive('get_name').and_return('mike') user = User() assert 'mike' == user.get_name() def test_flexmock_should_match_expectations_against_builtin_classes(self): mock = flexmock(name='temp') mock.should_receive('method_foo').with_args(str).and_return('got a string') mock.should_receive('method_foo').with_args(int).and_return('got an int') assert 'got a string' == mock.method_foo('string!') assert 'got an int' == mock.method_foo(23) assertRaises(InvalidMethodSignature, mock.method_foo, 2.0) def test_flexmock_should_match_expectations_against_user_defined_classes(self): mock = flexmock(name='temp') class Foo: pass mock.should_receive('method_foo').with_args(Foo).and_return('got a Foo') assert 'got a Foo' == mock.method_foo(Foo()) assertRaises(InvalidMethodSignature, mock.method_foo, 1) def test_flexmock_configures_global_mocks_dict(self): mock = flexmock(name='temp') for expectations in FlexmockContainer.flexmock_objects.values(): assert 0 == len(expectations) mock.should_receive('method_foo') for expectations in FlexmockContainer.flexmock_objects.values(): assert 1 == len(expectations) def test_flexmock_teardown_verifies_mocks(self): mock = flexmock(name='temp') mock.should_receive('verify_expectations').times(1) assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_teardown_does_not_verify_stubs(self): mock = flexmock(name='temp') mock.should_receive('verify_expectations') _tear_down(self) def test_flexmock_preserves_stubbed_object_methods_between_tests(self): class User: def get_name(self): return 'mike' user = User() flexmock(user).should_receive('get_name').and_return('john') assert 'john' == user.get_name() _tear_down(self) assert 'mike' == user.get_name() def test_flexmock_preserves_stubbed_class_methods_between_tests(self): class User: def get_name(self): return 'mike' user = User() flexmock(User).should_receive('get_name').and_return('john') assert 'john' == user.get_name() _tear_down(self) assert 'mike' == user.get_name() def test_flexmock_removes_new_stubs_from_objects_after_tests(self): class User: def get_name(self): pass user = User() saved = user.get_name flexmock(user).should_receive('get_name').and_return('john') assert saved != user.get_name assert 'john' == user.get_name() _tear_down(self) assert saved == user.get_name def test_flexmock_removes_new_stubs_from_classes_after_tests(self): class User: def get_name(self): pass user = User() saved = user.get_name flexmock(User).should_receive('get_name').and_return('john') assert saved != user.get_name assert 'john' == user.get_name() _tear_down(self) assert saved == user.get_name def test_flexmock_removes_stubs_from_multiple_objects_on_teardown(self): class User: def get_name(self): pass class Group: def get_name(self): pass user = User() group = User() saved1 = user.get_name saved2 = group.get_name flexmock(user).should_receive('get_name').and_return('john').once flexmock(group).should_receive('get_name').and_return('john').once assert saved1 != user.get_name assert saved2 != group.get_name assert 'john' == user.get_name() assert 'john' == group.get_name() _tear_down(self) assert saved1 == user.get_name assert saved2 == group.get_name def test_flexmock_removes_stubs_from_multiple_classes_on_teardown(self): class User: def get_name(self): pass class Group: def get_name(self): pass user = User() group = User() saved1 = user.get_name saved2 = group.get_name flexmock(User).should_receive('get_name').and_return('john') flexmock(Group).should_receive('get_name').and_return('john') assert saved1 != user.get_name assert saved2 != group.get_name assert 'john' == user.get_name() assert 'john' == group.get_name() _tear_down(self) assert saved1 == user.get_name assert saved2 == group.get_name def test_flexmock_respects_at_least_when_called_less_than_requested(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('bar').at_least.twice expectation = mock._get_flexmock_expectation('method_foo') assert Expectation.AT_LEAST == expectation.modifier mock.method_foo() assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_respects_at_least_when_called_requested_number(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').at_least.once expectation = mock._get_flexmock_expectation('method_foo') assert Expectation.AT_LEAST == expectation.modifier mock.method_foo() _tear_down(self) def test_flexmock_respects_at_least_when_called_more_than_requested(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').at_least.once expectation = mock._get_flexmock_expectation('method_foo') assert Expectation.AT_LEAST == expectation.modifier mock.method_foo() mock.method_foo() _tear_down(self) def test_flexmock_respects_at_most_when_called_less_than_requested(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('bar').at_most.twice expectation = mock._get_flexmock_expectation('method_foo') assert Expectation.AT_MOST == expectation.modifier mock.method_foo() _tear_down(self) def test_flexmock_respects_at_most_when_called_requested_number(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').at_most.once expectation = mock._get_flexmock_expectation('method_foo') assert Expectation.AT_MOST == expectation.modifier mock.method_foo() _tear_down(self) def test_flexmock_respects_at_most_when_called_more_than_requested(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').at_most.once expectation = mock._get_flexmock_expectation('method_foo') assert Expectation.AT_MOST == expectation.modifier mock.method_foo() mock.method_foo() assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_treats_once_as_times_one(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').once expectation = mock._get_flexmock_expectation('method_foo') assert 1 == expectation.expected_calls assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_treats_twice_as_times_two(self): mock = flexmock(name='temp') mock.should_receive('method_foo').twice.and_return('value_bar') expectation = mock._get_flexmock_expectation('method_foo') assert 2 == expectation.expected_calls assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_works_with_never_when_true(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').never expectation = mock._get_flexmock_expectation('method_foo') assert 0 == expectation.expected_calls _tear_down(self) def test_flexmock_works_with_never_when_false(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').never mock.method_foo() assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_get_flexmock_expectation_should_work_with_args(self): mock = flexmock(name='temp') mock.should_receive('method_foo').with_args('value_bar') assert mock._get_flexmock_expectation('method_foo', 'value_bar') def test_flexmock_function_should_return_previously_mocked_object(self): class User(object): pass user = User() foo = flexmock(user) assert foo._mock == flexmock(user) def test_flexmock_should_not_return_class_object_if_mocking_instance(self): class User: def method(self): pass user = User() user2 = User() class_mock = flexmock(User).should_receive( 'method').and_return('class').mock user_mock = flexmock(user).should_receive( 'method').and_return('instance').mock assert class_mock is not user_mock assert 'instance' == user.method() assert 'class' == user2.method() def test_flexmock_should_blow_up_on_and_execute_for_class_mock(self): class User: def foo(self): return 'class' try: flexmock(User).should_receive('foo').and_execute raise Exception('and_execute should have raised an exception') except AndExecuteNotSupportedForClassMocks: pass def test_flexmock_should_mock_new_instances(self): class User(object): pass class Group(object): pass user = User() flexmock(Group, new_instances=user) assert user is Group() def test_flexmock_should_mock_new_instances_with_multiple_params(self): class User(object): pass class Group(object): def __init__(self, arg, arg2): pass user = User() flexmock(Group, new_instances=user) assert user is Group(1, 2) def test_flexmock_should_revert_new_instances_on_teardown(self): class User(object): pass class Group(object): pass user = User() group = Group() flexmock(Group, new_instances=user) assert user is Group() _tear_down(self) assert group.__class__ == Group().__class__ def test_flexmock_should_cleanup_added_methods_and_attributes(self): class Group(object): pass flexmock(Group) _tear_down(self) for method in FlexMock.UPDATED_ATTRS: assert method not in dir(Group) def test_flexmock_should_cleanup_after_exception(self): class User: def method2(self): pass class Group: def method1(self): pass flexmock(Group) flexmock(User) Group.should_receive('method1').once User.should_receive('method2').once assertRaises(MethodNotCalled, _tear_down, self) for method in FlexMock.UPDATED_ATTRS: assert method not in dir(Group) for method in FlexMock.UPDATED_ATTRS: assert method not in dir(User) def test_flexmock_and_execute_respects_matched_expectations(self): class Group(object): def method1(self, arg1, arg2='b'): return '%s:%s' % (arg1, arg2) def method2(self, arg): return arg group = Group() flexmock(group).should_receive('method1').twice.and_execute assert 'a:c' == group.method1('a', arg2='c') assert 'a:b' == group.method1('a') group.should_receive('method2').once.with_args('c').and_execute assert 'c' == group.method2('c') _tear_down(self) def test_flexmock_and_execute_respects_unmatched_expectations(self): class Group(object): def method1(self, arg1, arg2='b'): return '%s:%s' % (arg1, arg2) def method2(self): pass group = Group() flexmock(group).should_receive('method1').at_least.once.and_execute assertRaises(MethodNotCalled, _tear_down, self) flexmock(group) group.should_receive('method2').with_args('a').once.and_execute group.should_receive('method2').with_args('not a') group.method2('not a') assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_doesnt_error_on_properly_ordered_expectations(self): class Foo(object): def foo(self): pass def method1(self): pass def bar(self): pass def baz(self): pass flexmock(Foo).should_receive('foo') flexmock(Foo).should_receive('method1').with_args('a').ordered flexmock(Foo).should_receive('bar') flexmock(Foo).should_receive('method1').with_args('b').ordered flexmock(Foo).should_receive('baz') Foo.bar() Foo.method1('a') Foo.method1('b') Foo.baz() Foo.foo() def test_flexmock_errors_on_improperly_ordered_expectations(self): class Foo(object): def foo(self): pass def method1(self): pass def bar(self): pass def baz(self): pass flexmock(Foo) Foo.should_receive('foo') Foo.should_receive('method1').with_args('a').ordered Foo.should_receive('bar') Foo.should_receive('method1').with_args('b').ordered Foo.should_receive('baz') Foo.bar() Foo.bar() Foo.foo() assertRaises(MethodCalledOutOfOrder, Foo.method1, 'b') def test_flexmock_should_accept_multiple_return_values(self): class Foo: def method1(self): pass foo = Foo() flexmock(foo).should_receive('method1').and_return(1, 5).and_return(2) assert (1, 5) == foo.method1() assert 2 == foo.method1() assert (1, 5) == foo.method1() assert 2 == foo.method1() def test_flexmock_should_accept_multiple_return_values_with_shortcut(self): class Foo: def method1(self): pass foo = Foo() flexmock(foo).should_receive('method1').and_return(1, 2).one_by_one assert 1 == foo.method1() assert 2 == foo.method1() assert 1 == foo.method1() assert 2 == foo.method1() def test_flexmock_should_mix_multiple_return_values_with_exceptions(self): class Foo: def method1(self): pass foo = Foo() flexmock(foo).should_receive('method1').and_return(1).and_raise(Exception) assert 1 == foo.method1() assertRaises(Exception, foo.method1) assert 1 == foo.method1() assertRaises(Exception, foo.method1) def test_flexmock_should_match_types_on_multiple_arguments(self): class Foo: def method1(self): pass foo = Foo() flexmock(foo).should_receive('method1').with_args(str, int).and_return('ok') assert 'ok', foo.method1('some string' == 12) assertRaises(InvalidMethodSignature, foo.method1, 12, 32) assertRaises(InvalidMethodSignature, foo.method1, 12, 'some string') assertRaises(InvalidMethodSignature, foo.method1, 'string', 12, 14) def test_flexmock_should_match_types_on_multiple_arguments_generic(self): class Foo: def method1(self): pass foo = Foo() flexmock(foo).should_receive('method1').with_args( object, object, object).and_return('ok') assert 'ok', foo.method1('some string', None == 12) assert 'ok', foo.method1((1,), None == 12) assert 'ok', foo.method1(12, 14 == []) assert 'ok', foo.method1('some string', 'another one' == False) assertRaises(InvalidMethodSignature, foo.method1, 'string', 12) assertRaises(InvalidMethodSignature, foo.method1, 'string', 12, 13, 14) def test_flexmock_should_match_types_on_multiple_arguments_classes(self): class Foo: def method1(self): pass class Bar: pass foo = Foo() bar = Bar() flexmock(foo).should_receive('method1').with_args( object, Bar).and_return('ok') assert 'ok', foo.method1('some string' == bar) assertRaises(InvalidMethodSignature, foo.method1, bar, 'some string') assertRaises(InvalidMethodSignature, foo.method1, 12, 'some string') def test_flexmock_should_match_keyword_arguments(self): class Foo: def method1(self): pass foo = Foo() flexmock(foo).should_receive('method1').with_args(1, arg3=3, arg2=2).twice foo.method1(1, arg2=2, arg3=3) foo.method1(1, arg3=3, arg2=2) _tear_down(self) flexmock(foo).should_receive('method1').with_args(1, arg3=3, arg2=2) assertRaises(InvalidMethodSignature, foo.method1, arg2=2, arg3=3) assertRaises(InvalidMethodSignature, foo.method1, 1, arg2=2, arg3=4) assertRaises(InvalidMethodSignature, foo.method1, 1) def test_flexmock_should_match_keyword_arguments_works_with_and_execute(self): class Foo: def method1(self, arg1, arg2=None, arg3=None): return '%s%s%s' % (arg1, arg2, arg3) foo = Foo() flexmock(foo).should_receive('method1').with_args( 1, arg3=3, arg2=2).and_execute.once assert '123' == foo.method1(1, arg2=2, arg3=3) def test_flexmock_should_mock_private_methods(self): class Foo: def __private_method(self): return 'foo' def public_method(self): return self.__private_method() foo = Foo() flexmock(foo).should_receive('__private_method').and_return('bar') assert 'bar' == foo.public_method() def test_flexmock_should_mock_private_class_methods(self): class Foo: pass flexmock(Foo).should_receive('__iter__').and_yield(1, 2, 3) assert [1, 2, 3] == [x for x in Foo()] def test_flexmock_should_mock_generators(self): class Gen: def foo(self): pass gen = Gen() flexmock(gen).should_receive('foo').and_yield(*range(1, 10)) output = [val for val in gen.foo()] assert [val for val in range(1, 10)] == output def test_flexmock_should_verify_correct_spy_return_values(self): class User: def get_stuff(self): return 'real', 'stuff' user = User() flexmock(user).should_receive( 'get_stuff').and_execute.and_return('real', 'stuff') assert ('real', 'stuff') == user.get_stuff() def test_flexmock_should_verify_spy_raises_correct_exception_class(self): class FakeException(Exception): def __init__(self, param, param2): self.message = '%s, %s' % (param, param2) Exception.__init__(self) class User: def get_stuff(self): raise FakeException(1, 2) user = User() flexmock(user).should_receive( 'get_stuff').and_execute.and_raise(FakeException, 1, 2) user.get_stuff() def test_flexmock_should_verify_spy_matches_exception_message(self): class FakeException(Exception): def __init__(self, param, param2): self.p1 = param self.p2 = param2 Exception.__init__(self, param) def __str__(self): return '%s, %s' % (self.p1, self.p2) class User: def get_stuff(self): raise FakeException(1, 2) user = User() flexmock(user).should_receive( 'get_stuff').and_execute.and_raise(FakeException, 2, 1) assertRaises(InvalidExceptionMessage, user.get_stuff) def test_flexmock_should_blow_up_on_wrong_exception_type(self): class User: def get_stuff(self): raise AlreadyMocked('foo') user = User() flexmock(user).should_receive( 'get_stuff').and_execute.and_raise(MethodNotCalled) assertRaises(InvalidExceptionClass, user.get_stuff) def test_flexmock_should_blow_up_on_wrong_spy_return_values(self): class User: def get_stuff(self): return 'real', 'stuff' def get_more_stuff(self): return 'other', 'stuff' user = User() flexmock(user).should_receive( 'get_stuff').and_execute.and_return('other', 'stuff') assertRaises(InvalidMethodSignature, user.get_stuff) flexmock(user).should_receive( 'get_more_stuff').and_execute.and_return() assertRaises(InvalidMethodSignature, user.get_more_stuff) def test_flexmock_should_mock_same_class_twice(self): class Foo: pass flexmock(Foo) flexmock(Foo) def test_flexmock_and_execute_should_not_clobber_original_method(self): class User: def get_stuff(self): return 'real', 'stuff' user = User() flexmock(user).should_receive('get_stuff').and_execute flexmock(user).should_receive('get_stuff').and_execute assert ('real', 'stuff') == user.get_stuff() def test_flexmock_should_properly_restore_static_methods(self): class User: @staticmethod def get_stuff(): return 'ok!' assert 'ok!' == User.get_stuff() flexmock(User).should_receive('get_stuff') assert User.get_stuff() is None _tear_down(self) assert 'ok!' == User.get_stuff() def test_flexmock_should_properly_restore_undecorated_static_methods(self): class User: def get_stuff(): return 'ok!' get_stuff = staticmethod(get_stuff) assert 'ok!' == User.get_stuff() flexmock(User).should_receive('get_stuff') assert User.get_stuff() is None _tear_down(self) assert 'ok!' == User.get_stuff() def test_flexmock_should_properly_restore_module_level_functions(self): if 'flexmock_test' in sys.modules: mod = sys.modules['flexmock_test'] else: mod = sys.modules['__main__'] flexmock(mod).should_receive('module_level_function') assert None == module_level_function(1, 2) _tear_down(self) assert '1, 2' == module_level_function(1, 2) def test_flexmock_should_properly_restore_class_methods(self): class User: @classmethod def get_stuff(cls): return cls.__name__ assert 'User' == User.get_stuff() flexmock(User).should_receive('get_stuff').and_return('foo') assert 'foo' == User.get_stuff() _tear_down(self) assert 'User' == User.get_stuff() def test_and_execute_should_match_return_value_class(self): class User: pass user = User() foo = flexmock(foo=lambda: ('bar', 'baz'), bar=lambda: user, baz=lambda: None, bax=lambda: None) foo.should_receive('foo').and_execute.and_return(str, str) foo.should_receive('bar').and_execute.and_return(User) foo.should_receive('baz').and_execute.and_return(object) foo.should_receive('bax').and_execute.and_return(None) assert ('bar', 'baz') == foo.foo() assert user == foo.bar() assert None == foo.baz() assert None == foo.bax() def test_new_instances_should_blow_up_on_should_receive(self): class User(object): pass mock = flexmock(User, new_instances=None) assertRaises(FlexmockException, mock.should_receive, 'foo') def test_should_call_alias_should_receive_and_execute(self): class Foo: def get_stuff(self): return 'yay' foo = Foo() flexmock(foo).should_call('get_stuff').and_return('yay').once assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_should_fail_mocking_nonexistent_methods(self): class User: pass user = User() assertRaises(MethodDoesNotExist, flexmock(user).should_receive, 'nonexistent') def test_flexmock_should_not_explode_on_unicode_formatting(self): if sys.version_info >= (3, 0): formatted = _format_args( 'method', {'kargs' : (chr(0x86C7),), 'kwargs' : {}}) assert formatted == 'method("蛇")' else: formatted = _format_args( 'method', {'kargs' : (unichr(0x86C7),), 'kwargs' : {}}) assert formatted == 'method("%s")' % unichr(0x86C7) def test_return_value_should_not_explode_on_unicode_values(self): class Foo: def method(self): pass if sys.version_info >= (3, 0): return_value = ReturnValue(chr(0x86C7)) assert '%s' % return_value == '蛇' else: return_value = ReturnValue(unichr(0x86C7)) assert unicode(return_value) == unichr(0x86C7) def test_pass_thru_should_call_original_method_only_once(self): class Nyan(object): def __init__(self): self.n = 0 def method(self): self.n += 1 obj = Nyan() flexmock(obj) obj.should_call('method') obj.method() self.assertEqual(obj.n, 1) def test_should_call_works_for_same_method_with_different_args(self): class Foo: def method(self, arg): pass foo = Foo() flexmock(foo).should_call('method').with_args('foo').once flexmock(foo).should_call('method').with_args('bar').once foo.method('foo') foo.method('bar') _tear_down(self) def test_should_call_fails_properly_for_same_method_with_different_args(self): class Foo: def method(self, arg): pass foo = Foo() flexmock(foo).should_call('method').with_args('foo').once flexmock(foo).should_call('method').with_args('bar').once foo.method('foo') assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_should_give_reasonable_error_for_builtins(self): assertRaises(AttemptingToMockBuiltin, flexmock, object) if __name__ == '__main__': unittest.main()
36.201163
81
0.710886
from flexmock import FlexMock from flexmock import AlreadyMocked from flexmock import AndExecuteNotSupportedForClassMocks from flexmock import AttemptingToMockBuiltin from flexmock import Expectation from flexmock import FlexmockContainer from flexmock import FlexmockException from flexmock import InvalidMethodSignature from flexmock import InvalidExceptionClass from flexmock import InvalidExceptionMessage from flexmock import MethodDoesNotExist from flexmock import MethodNotCalled from flexmock import MethodCalledOutOfOrder from flexmock import ReturnValue from flexmock import flexmock from flexmock import flexmock_nose from flexmock import _format_args import sys import unittest def module_level_function(some, args): return "%s, %s" % (some, args) def _tear_down(runner): return unittest.TestCase.tearDown(runner) def assertRaises(exception, method, *kargs, **kwargs): try: method(*kargs, **kwargs) except exception: assert True return except: pass raise Exception('%s not raised' % exception.__name__) class TestFlexmock(unittest.TestCase): def test_flexmock_should_create_mock_object(self): mock = flexmock() assert isinstance(mock, FlexMock) def test_flexmock_should_create_mock_object_from_dict(self): mock = flexmock(foo='foo', bar='bar') assert 'foo' == mock.foo assert 'bar' == mock.bar def test_flexmock_should_add_expectations(self): mock = flexmock(name='temp') mock.should_receive('method_foo') assert 'method_foo' in [x.method for x in mock._flexmock_expectations] def test_flexmock_should_return_value(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar') mock.should_receive('method_bar').and_return('value_baz') assert 'value_bar' == mock.method_foo() assert 'value_baz' == mock.method_bar() def test_flexmock_should_accept_shortcuts_for_creating_mock_object(self): mock = flexmock(attr1='value 1', attr2=lambda: 'returning 2') assert 'value 1' == mock.attr1 assert 'returning 2' == mock.attr2() def test_flexmock_should_accept_shortcuts_for_creating_expectations(self): class Foo: def method1(self): pass def method2(self): pass foo = Foo() flexmock(foo, method1='returning 1', method2='returning 2') assert 'returning 1' == foo.method1() assert 'returning 2' == foo.method2() assert 'returning 2' == foo.method2() def test_flexmock_expectations_returns_all(self): mock = flexmock(name='temp') assert 0 == len(mock._flexmock_expectations) mock.should_receive('method_foo') mock.should_receive('method_bar') assert 2 == len(mock._flexmock_expectations) def test_flexmock_expectations_returns_named_expectation(self): mock = flexmock(name='temp') mock.should_receive('method_foo') assert 'method_foo' == mock._get_flexmock_expectation('method_foo').method def test_flexmock_expectations_returns_none_if_not_found(self): mock = flexmock(name='temp') assert mock._get_flexmock_expectation('method_foo') is None def test_flexmock_should_check_parameters(self): mock = flexmock(name='temp') mock.should_receive('method_foo').with_args('bar').and_return(1) mock.should_receive('method_foo').with_args('baz').and_return(2) assert 1 == mock.method_foo('bar') assert 2 == mock.method_foo('baz') def test_flexmock_should_keep_track_of_calls(self): mock = flexmock(name='temp') mock.should_receive('method_foo').with_args('foo').and_return(0) mock.should_receive('method_foo').with_args('bar').and_return(1) mock.should_receive('method_foo').with_args('baz').and_return(2) mock.method_foo('bar') mock.method_foo('bar') mock.method_foo('baz') expectation = mock._get_flexmock_expectation('method_foo', ('foo',)) assert 0 == expectation.times_called expectation = mock._get_flexmock_expectation('method_foo', ('bar',)) assert 2 == expectation.times_called expectation = mock._get_flexmock_expectation('method_foo', ('baz',)) assert 1 == expectation.times_called def test_flexmock_should_set_expectation_call_numbers(self): mock = flexmock(name='temp') mock.should_receive('method_foo').times(1) expectation = mock._get_flexmock_expectation('method_foo') assertRaises(MethodNotCalled, expectation.verify) mock.method_foo() expectation.verify() def test_flexmock_should_check_raised_exceptions(self): mock = flexmock(name='temp') class FakeException(Exception): pass mock.should_receive('method_foo').and_raise(FakeException) assertRaises(FakeException, mock.method_foo) assert 1 == mock._get_flexmock_expectation('method_foo').times_called def test_flexmock_should_check_raised_exceptions_instance_with_args(self): mock = flexmock(name='temp') class FakeException(Exception): def __init__(self, arg, arg2): pass mock.should_receive('method_foo').and_raise(FakeException(1, arg2=2)) assertRaises(FakeException, mock.method_foo) assert 1 == mock._get_flexmock_expectation('method_foo').times_called def test_flexmock_should_check_raised_exceptions_class_with_args(self): mock = flexmock(name='temp') class FakeException(Exception): def __init__(self, arg, arg2): pass mock.should_receive('method_foo').and_raise(FakeException, 1, arg2=2) assertRaises(FakeException, mock.method_foo) assert 1 == mock._get_flexmock_expectation('method_foo').times_called def test_flexmock_should_match_any_args_by_default(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('bar') mock.should_receive('method_foo').with_args('baz').and_return('baz') assert 'bar' == mock.method_foo() assert 'bar' == mock.method_foo(1) assert 'bar', mock.method_foo('foo' == 'bar') assert 'baz' == mock.method_foo('baz') def test_expectation_dot_mock_should_return_mock(self): mock = flexmock(name='temp') assert mock == mock.should_receive('method_foo').mock def test_flexmock_should_create_partial_new_style_object_mock(self): class User(object): def __init__(self, name=None): self.name = name def get_name(self): return self.name def set_name(self, name): self.name = name user = User() flexmock(user) user.should_receive('get_name').and_return('john') user.set_name('mike') assert 'john' == user.get_name() def test_flexmock_should_create_partial_old_style_object_mock(self): class User: def __init__(self, name=None): self.name = name def get_name(self): return self.name def set_name(self, name): self.name = name user = User() flexmock(user) user.should_receive('get_name').and_return('john') user.set_name('mike') assert 'john' == user.get_name() def test_flexmock_should_create_partial_new_style_class_mock(self): class User(object): def __init__(self): pass def get_name(self): pass flexmock(User) User.should_receive('get_name').and_return('mike') user = User() assert 'mike' == user.get_name() def test_flexmock_should_create_partial_old_style_class_mock(self): class User: def __init__(self): pass def get_name(self): pass flexmock(User) User.should_receive('get_name').and_return('mike') user = User() assert 'mike' == user.get_name() def test_flexmock_should_match_expectations_against_builtin_classes(self): mock = flexmock(name='temp') mock.should_receive('method_foo').with_args(str).and_return('got a string') mock.should_receive('method_foo').with_args(int).and_return('got an int') assert 'got a string' == mock.method_foo('string!') assert 'got an int' == mock.method_foo(23) assertRaises(InvalidMethodSignature, mock.method_foo, 2.0) def test_flexmock_should_match_expectations_against_user_defined_classes(self): mock = flexmock(name='temp') class Foo: pass mock.should_receive('method_foo').with_args(Foo).and_return('got a Foo') assert 'got a Foo' == mock.method_foo(Foo()) assertRaises(InvalidMethodSignature, mock.method_foo, 1) def test_flexmock_configures_global_mocks_dict(self): mock = flexmock(name='temp') for expectations in FlexmockContainer.flexmock_objects.values(): assert 0 == len(expectations) mock.should_receive('method_foo') for expectations in FlexmockContainer.flexmock_objects.values(): assert 1 == len(expectations) def test_flexmock_teardown_verifies_mocks(self): mock = flexmock(name='temp') mock.should_receive('verify_expectations').times(1) assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_teardown_does_not_verify_stubs(self): mock = flexmock(name='temp') mock.should_receive('verify_expectations') _tear_down(self) def test_flexmock_preserves_stubbed_object_methods_between_tests(self): class User: def get_name(self): return 'mike' user = User() flexmock(user).should_receive('get_name').and_return('john') assert 'john' == user.get_name() _tear_down(self) assert 'mike' == user.get_name() def test_flexmock_preserves_stubbed_class_methods_between_tests(self): class User: def get_name(self): return 'mike' user = User() flexmock(User).should_receive('get_name').and_return('john') assert 'john' == user.get_name() _tear_down(self) assert 'mike' == user.get_name() def test_flexmock_removes_new_stubs_from_objects_after_tests(self): class User: def get_name(self): pass user = User() saved = user.get_name flexmock(user).should_receive('get_name').and_return('john') assert saved != user.get_name assert 'john' == user.get_name() _tear_down(self) assert saved == user.get_name def test_flexmock_removes_new_stubs_from_classes_after_tests(self): class User: def get_name(self): pass user = User() saved = user.get_name flexmock(User).should_receive('get_name').and_return('john') assert saved != user.get_name assert 'john' == user.get_name() _tear_down(self) assert saved == user.get_name def test_flexmock_removes_stubs_from_multiple_objects_on_teardown(self): class User: def get_name(self): pass class Group: def get_name(self): pass user = User() group = User() saved1 = user.get_name saved2 = group.get_name flexmock(user).should_receive('get_name').and_return('john').once flexmock(group).should_receive('get_name').and_return('john').once assert saved1 != user.get_name assert saved2 != group.get_name assert 'john' == user.get_name() assert 'john' == group.get_name() _tear_down(self) assert saved1 == user.get_name assert saved2 == group.get_name def test_flexmock_removes_stubs_from_multiple_classes_on_teardown(self): class User: def get_name(self): pass class Group: def get_name(self): pass user = User() group = User() saved1 = user.get_name saved2 = group.get_name flexmock(User).should_receive('get_name').and_return('john') flexmock(Group).should_receive('get_name').and_return('john') assert saved1 != user.get_name assert saved2 != group.get_name assert 'john' == user.get_name() assert 'john' == group.get_name() _tear_down(self) assert saved1 == user.get_name assert saved2 == group.get_name def test_flexmock_respects_at_least_when_called_less_than_requested(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('bar').at_least.twice expectation = mock._get_flexmock_expectation('method_foo') assert Expectation.AT_LEAST == expectation.modifier mock.method_foo() assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_respects_at_least_when_called_requested_number(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').at_least.once expectation = mock._get_flexmock_expectation('method_foo') assert Expectation.AT_LEAST == expectation.modifier mock.method_foo() _tear_down(self) def test_flexmock_respects_at_least_when_called_more_than_requested(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').at_least.once expectation = mock._get_flexmock_expectation('method_foo') assert Expectation.AT_LEAST == expectation.modifier mock.method_foo() mock.method_foo() _tear_down(self) def test_flexmock_respects_at_most_when_called_less_than_requested(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('bar').at_most.twice expectation = mock._get_flexmock_expectation('method_foo') assert Expectation.AT_MOST == expectation.modifier mock.method_foo() _tear_down(self) def test_flexmock_respects_at_most_when_called_requested_number(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').at_most.once expectation = mock._get_flexmock_expectation('method_foo') assert Expectation.AT_MOST == expectation.modifier mock.method_foo() _tear_down(self) def test_flexmock_respects_at_most_when_called_more_than_requested(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').at_most.once expectation = mock._get_flexmock_expectation('method_foo') assert Expectation.AT_MOST == expectation.modifier mock.method_foo() mock.method_foo() assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_treats_once_as_times_one(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').once expectation = mock._get_flexmock_expectation('method_foo') assert 1 == expectation.expected_calls assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_treats_twice_as_times_two(self): mock = flexmock(name='temp') mock.should_receive('method_foo').twice.and_return('value_bar') expectation = mock._get_flexmock_expectation('method_foo') assert 2 == expectation.expected_calls assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_works_with_never_when_true(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').never expectation = mock._get_flexmock_expectation('method_foo') assert 0 == expectation.expected_calls _tear_down(self) def test_flexmock_works_with_never_when_false(self): mock = flexmock(name='temp') mock.should_receive('method_foo').and_return('value_bar').never mock.method_foo() assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_get_flexmock_expectation_should_work_with_args(self): mock = flexmock(name='temp') mock.should_receive('method_foo').with_args('value_bar') assert mock._get_flexmock_expectation('method_foo', 'value_bar') def test_flexmock_function_should_return_previously_mocked_object(self): class User(object): pass user = User() foo = flexmock(user) assert foo._mock == flexmock(user) def test_flexmock_should_not_return_class_object_if_mocking_instance(self): class User: def method(self): pass user = User() user2 = User() class_mock = flexmock(User).should_receive( 'method').and_return('class').mock user_mock = flexmock(user).should_receive( 'method').and_return('instance').mock assert class_mock is not user_mock assert 'instance' == user.method() assert 'class' == user2.method() def test_flexmock_should_blow_up_on_and_execute_for_class_mock(self): class User: def foo(self): return 'class' try: flexmock(User).should_receive('foo').and_execute raise Exception('and_execute should have raised an exception') except AndExecuteNotSupportedForClassMocks: pass def test_flexmock_should_mock_new_instances(self): class User(object): pass class Group(object): pass user = User() flexmock(Group, new_instances=user) assert user is Group() def test_flexmock_should_mock_new_instances_with_multiple_params(self): class User(object): pass class Group(object): def __init__(self, arg, arg2): pass user = User() flexmock(Group, new_instances=user) assert user is Group(1, 2) def test_flexmock_should_revert_new_instances_on_teardown(self): class User(object): pass class Group(object): pass user = User() group = Group() flexmock(Group, new_instances=user) assert user is Group() _tear_down(self) assert group.__class__ == Group().__class__ def test_flexmock_should_cleanup_added_methods_and_attributes(self): class Group(object): pass flexmock(Group) _tear_down(self) for method in FlexMock.UPDATED_ATTRS: assert method not in dir(Group) def test_flexmock_should_cleanup_after_exception(self): class User: def method2(self): pass class Group: def method1(self): pass flexmock(Group) flexmock(User) Group.should_receive('method1').once User.should_receive('method2').once assertRaises(MethodNotCalled, _tear_down, self) for method in FlexMock.UPDATED_ATTRS: assert method not in dir(Group) for method in FlexMock.UPDATED_ATTRS: assert method not in dir(User) def test_flexmock_and_execute_respects_matched_expectations(self): class Group(object): def method1(self, arg1, arg2='b'): return '%s:%s' % (arg1, arg2) def method2(self, arg): return arg group = Group() flexmock(group).should_receive('method1').twice.and_execute assert 'a:c' == group.method1('a', arg2='c') assert 'a:b' == group.method1('a') group.should_receive('method2').once.with_args('c').and_execute assert 'c' == group.method2('c') _tear_down(self) def test_flexmock_and_execute_respects_unmatched_expectations(self): class Group(object): def method1(self, arg1, arg2='b'): return '%s:%s' % (arg1, arg2) def method2(self): pass group = Group() flexmock(group).should_receive('method1').at_least.once.and_execute assertRaises(MethodNotCalled, _tear_down, self) flexmock(group) group.should_receive('method2').with_args('a').once.and_execute group.should_receive('method2').with_args('not a') group.method2('not a') assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_doesnt_error_on_properly_ordered_expectations(self): class Foo(object): def foo(self): pass def method1(self): pass def bar(self): pass def baz(self): pass flexmock(Foo).should_receive('foo') flexmock(Foo).should_receive('method1').with_args('a').ordered flexmock(Foo).should_receive('bar') flexmock(Foo).should_receive('method1').with_args('b').ordered flexmock(Foo).should_receive('baz') Foo.bar() Foo.method1('a') Foo.method1('b') Foo.baz() Foo.foo() def test_flexmock_errors_on_improperly_ordered_expectations(self): class Foo(object): def foo(self): pass def method1(self): pass def bar(self): pass def baz(self): pass flexmock(Foo) Foo.should_receive('foo') Foo.should_receive('method1').with_args('a').ordered Foo.should_receive('bar') Foo.should_receive('method1').with_args('b').ordered Foo.should_receive('baz') Foo.bar() Foo.bar() Foo.foo() assertRaises(MethodCalledOutOfOrder, Foo.method1, 'b') def test_flexmock_should_accept_multiple_return_values(self): class Foo: def method1(self): pass foo = Foo() flexmock(foo).should_receive('method1').and_return(1, 5).and_return(2) assert (1, 5) == foo.method1() assert 2 == foo.method1() assert (1, 5) == foo.method1() assert 2 == foo.method1() def test_flexmock_should_accept_multiple_return_values_with_shortcut(self): class Foo: def method1(self): pass foo = Foo() flexmock(foo).should_receive('method1').and_return(1, 2).one_by_one assert 1 == foo.method1() assert 2 == foo.method1() assert 1 == foo.method1() assert 2 == foo.method1() def test_flexmock_should_mix_multiple_return_values_with_exceptions(self): class Foo: def method1(self): pass foo = Foo() flexmock(foo).should_receive('method1').and_return(1).and_raise(Exception) assert 1 == foo.method1() assertRaises(Exception, foo.method1) assert 1 == foo.method1() assertRaises(Exception, foo.method1) def test_flexmock_should_match_types_on_multiple_arguments(self): class Foo: def method1(self): pass foo = Foo() flexmock(foo).should_receive('method1').with_args(str, int).and_return('ok') assert 'ok', foo.method1('some string' == 12) assertRaises(InvalidMethodSignature, foo.method1, 12, 32) assertRaises(InvalidMethodSignature, foo.method1, 12, 'some string') assertRaises(InvalidMethodSignature, foo.method1, 'string', 12, 14) def test_flexmock_should_match_types_on_multiple_arguments_generic(self): class Foo: def method1(self): pass foo = Foo() flexmock(foo).should_receive('method1').with_args( object, object, object).and_return('ok') assert 'ok', foo.method1('some string', None == 12) assert 'ok', foo.method1((1,), None == 12) assert 'ok', foo.method1(12, 14 == []) assert 'ok', foo.method1('some string', 'another one' == False) assertRaises(InvalidMethodSignature, foo.method1, 'string', 12) assertRaises(InvalidMethodSignature, foo.method1, 'string', 12, 13, 14) def test_flexmock_should_match_types_on_multiple_arguments_classes(self): class Foo: def method1(self): pass class Bar: pass foo = Foo() bar = Bar() flexmock(foo).should_receive('method1').with_args( object, Bar).and_return('ok') assert 'ok', foo.method1('some string' == bar) assertRaises(InvalidMethodSignature, foo.method1, bar, 'some string') assertRaises(InvalidMethodSignature, foo.method1, 12, 'some string') def test_flexmock_should_match_keyword_arguments(self): class Foo: def method1(self): pass foo = Foo() flexmock(foo).should_receive('method1').with_args(1, arg3=3, arg2=2).twice foo.method1(1, arg2=2, arg3=3) foo.method1(1, arg3=3, arg2=2) _tear_down(self) flexmock(foo).should_receive('method1').with_args(1, arg3=3, arg2=2) assertRaises(InvalidMethodSignature, foo.method1, arg2=2, arg3=3) assertRaises(InvalidMethodSignature, foo.method1, 1, arg2=2, arg3=4) assertRaises(InvalidMethodSignature, foo.method1, 1) def test_flexmock_should_match_keyword_arguments_works_with_and_execute(self): class Foo: def method1(self, arg1, arg2=None, arg3=None): return '%s%s%s' % (arg1, arg2, arg3) foo = Foo() flexmock(foo).should_receive('method1').with_args( 1, arg3=3, arg2=2).and_execute.once assert '123' == foo.method1(1, arg2=2, arg3=3) def test_flexmock_should_mock_private_methods(self): class Foo: def __private_method(self): return 'foo' def public_method(self): return self.__private_method() foo = Foo() flexmock(foo).should_receive('__private_method').and_return('bar') assert 'bar' == foo.public_method() def test_flexmock_should_mock_private_class_methods(self): class Foo: pass flexmock(Foo).should_receive('__iter__').and_yield(1, 2, 3) assert [1, 2, 3] == [x for x in Foo()] def test_flexmock_should_mock_generators(self): class Gen: def foo(self): pass gen = Gen() flexmock(gen).should_receive('foo').and_yield(*range(1, 10)) output = [val for val in gen.foo()] assert [val for val in range(1, 10)] == output def test_flexmock_should_verify_correct_spy_return_values(self): class User: def get_stuff(self): return 'real', 'stuff' user = User() flexmock(user).should_receive( 'get_stuff').and_execute.and_return('real', 'stuff') assert ('real', 'stuff') == user.get_stuff() def test_flexmock_should_verify_spy_raises_correct_exception_class(self): class FakeException(Exception): def __init__(self, param, param2): self.message = '%s, %s' % (param, param2) Exception.__init__(self) class User: def get_stuff(self): raise FakeException(1, 2) user = User() flexmock(user).should_receive( 'get_stuff').and_execute.and_raise(FakeException, 1, 2) user.get_stuff() def test_flexmock_should_verify_spy_matches_exception_message(self): class FakeException(Exception): def __init__(self, param, param2): self.p1 = param self.p2 = param2 Exception.__init__(self, param) def __str__(self): return '%s, %s' % (self.p1, self.p2) class User: def get_stuff(self): raise FakeException(1, 2) user = User() flexmock(user).should_receive( 'get_stuff').and_execute.and_raise(FakeException, 2, 1) assertRaises(InvalidExceptionMessage, user.get_stuff) def test_flexmock_should_blow_up_on_wrong_exception_type(self): class User: def get_stuff(self): raise AlreadyMocked('foo') user = User() flexmock(user).should_receive( 'get_stuff').and_execute.and_raise(MethodNotCalled) assertRaises(InvalidExceptionClass, user.get_stuff) def test_flexmock_should_blow_up_on_wrong_spy_return_values(self): class User: def get_stuff(self): return 'real', 'stuff' def get_more_stuff(self): return 'other', 'stuff' user = User() flexmock(user).should_receive( 'get_stuff').and_execute.and_return('other', 'stuff') assertRaises(InvalidMethodSignature, user.get_stuff) flexmock(user).should_receive( 'get_more_stuff').and_execute.and_return() assertRaises(InvalidMethodSignature, user.get_more_stuff) def test_flexmock_should_mock_same_class_twice(self): class Foo: pass flexmock(Foo) flexmock(Foo) def test_flexmock_and_execute_should_not_clobber_original_method(self): class User: def get_stuff(self): return 'real', 'stuff' user = User() flexmock(user).should_receive('get_stuff').and_execute flexmock(user).should_receive('get_stuff').and_execute assert ('real', 'stuff') == user.get_stuff() def test_flexmock_should_properly_restore_static_methods(self): class User: @staticmethod def get_stuff(): return 'ok!' assert 'ok!' == User.get_stuff() flexmock(User).should_receive('get_stuff') assert User.get_stuff() is None _tear_down(self) assert 'ok!' == User.get_stuff() def test_flexmock_should_properly_restore_undecorated_static_methods(self): class User: def get_stuff(): return 'ok!' get_stuff = staticmethod(get_stuff) assert 'ok!' == User.get_stuff() flexmock(User).should_receive('get_stuff') assert User.get_stuff() is None _tear_down(self) assert 'ok!' == User.get_stuff() def test_flexmock_should_properly_restore_module_level_functions(self): if 'flexmock_test' in sys.modules: mod = sys.modules['flexmock_test'] else: mod = sys.modules['__main__'] flexmock(mod).should_receive('module_level_function') assert None == module_level_function(1, 2) _tear_down(self) assert '1, 2' == module_level_function(1, 2) def test_flexmock_should_properly_restore_class_methods(self): class User: @classmethod def get_stuff(cls): return cls.__name__ assert 'User' == User.get_stuff() flexmock(User).should_receive('get_stuff').and_return('foo') assert 'foo' == User.get_stuff() _tear_down(self) assert 'User' == User.get_stuff() def test_and_execute_should_match_return_value_class(self): class User: pass user = User() foo = flexmock(foo=lambda: ('bar', 'baz'), bar=lambda: user, baz=lambda: None, bax=lambda: None) foo.should_receive('foo').and_execute.and_return(str, str) foo.should_receive('bar').and_execute.and_return(User) foo.should_receive('baz').and_execute.and_return(object) foo.should_receive('bax').and_execute.and_return(None) assert ('bar', 'baz') == foo.foo() assert user == foo.bar() assert None == foo.baz() assert None == foo.bax() def test_new_instances_should_blow_up_on_should_receive(self): class User(object): pass mock = flexmock(User, new_instances=None) assertRaises(FlexmockException, mock.should_receive, 'foo') def test_should_call_alias_should_receive_and_execute(self): class Foo: def get_stuff(self): return 'yay' foo = Foo() flexmock(foo).should_call('get_stuff').and_return('yay').once assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_should_fail_mocking_nonexistent_methods(self): class User: pass user = User() assertRaises(MethodDoesNotExist, flexmock(user).should_receive, 'nonexistent') def test_flexmock_should_not_explode_on_unicode_formatting(self): if sys.version_info >= (3, 0): formatted = _format_args( 'method', {'kargs' : (chr(0x86C7),), 'kwargs' : {}}) assert formatted == 'method("蛇")' else: formatted = _format_args( 'method', {'kargs' : (unichr(0x86C7),), 'kwargs' : {}}) assert formatted == 'method("%s")' % unichr(0x86C7) def test_return_value_should_not_explode_on_unicode_values(self): class Foo: def method(self): pass if sys.version_info >= (3, 0): return_value = ReturnValue(chr(0x86C7)) assert '%s' % return_value == '蛇' else: return_value = ReturnValue(unichr(0x86C7)) assert unicode(return_value) == unichr(0x86C7) def test_pass_thru_should_call_original_method_only_once(self): class Nyan(object): def __init__(self): self.n = 0 def method(self): self.n += 1 obj = Nyan() flexmock(obj) obj.should_call('method') obj.method() self.assertEqual(obj.n, 1) def test_should_call_works_for_same_method_with_different_args(self): class Foo: def method(self, arg): pass foo = Foo() flexmock(foo).should_call('method').with_args('foo').once flexmock(foo).should_call('method').with_args('bar').once foo.method('foo') foo.method('bar') _tear_down(self) def test_should_call_fails_properly_for_same_method_with_different_args(self): class Foo: def method(self, arg): pass foo = Foo() flexmock(foo).should_call('method').with_args('foo').once flexmock(foo).should_call('method').with_args('bar').once foo.method('foo') assertRaises(MethodNotCalled, _tear_down, self) def test_flexmock_should_give_reasonable_error_for_builtins(self): assertRaises(AttemptingToMockBuiltin, flexmock, object) if __name__ == '__main__': unittest.main()
true
true
1c45ccf6c4e027fc171552bcb089538da702ede9
3,773
py
Python
dataviz/timeline_gibraltar.py
Udzu/pudzu
5a0302830b052fc54feba891eb7bf634957a9d90
[ "MIT" ]
119
2017-07-22T15:02:30.000Z
2021-08-02T10:42:59.000Z
dataviz/timeline_gibraltar.py
Udzu/pudzu
5a0302830b052fc54feba891eb7bf634957a9d90
[ "MIT" ]
null
null
null
dataviz/timeline_gibraltar.py
Udzu/pudzu
5a0302830b052fc54feba891eb7bf634957a9d90
[ "MIT" ]
28
2017-08-04T14:28:41.000Z
2019-11-27T23:46:14.000Z
from pudzu.charts import * from pudzu.dates import * from collections import defaultdict df = pd.read_csv("datasets/timeline_gibraltar.csv") df_events = pd.read_csv("datasets/timeline_gibraltar_events.csv") START, END, INTERVAL = 1000, 2000, 250 PHOTOS = ["http://www.kindredgroup.com/wp-content/uploads/2016/11/gibralta.jpg", "http://www.visitgibraltar.gi/images/site_images/HcZOv_gugu.jpg"] COLORS = { 'm': VegaPalette10.GREEN, 's': VegaPalette10.ORANGE, 'u': VegaPalette10.RED } ICONS = { 's': MaskUnion(..., "white", masks=Image.open("icons/trebuchet.png").to_rgba().resize_fixed_aspect(width=15)) } # timeline def colorfn(d, w, h): return COLORS[d['type']] def labelfn(d, w): s = d['period'].replace("\\n", "\n") if "Medina" in d['period'] and w < 20: return None if "Medina" in d['period'] and w < 50: s = "MS" if "Granada" in d['period'] and w < 50: s = "Gra'da" return Image.from_text(s, arial(10), "white", align="center", padding=2, beard_line=True) def eventfn(d): return ICONS[d['type']].pad(2,0) ilabels = {(TimeChartLabelPosition.BELOW, TimeChartLabelPosition.INSIDE): labelfn, TimeChartLabelPosition.ABOVE: lambda: " ", TimeChartLabelPosition.BELOW: lambda: " "} llabels = ["AD {}".format(start) for start in range(START, END, INTERVAL)] data = [df.filter_rows("start<{} and end>{}".format(start+INTERVAL, start)).update_columns(start=lambda v: v-start, end=lambda v: v-start) for start in range(START, END, INTERVAL)] event_data = [df_events.filter_rows("time<{} and time>={}".format(time+INTERVAL, time)).update_columns(time=lambda v: v-time) for time in range(START, END, INTERVAL)] chart = time_chart(800, 40, data, "start", "end", colorfn, label_font=arial(10), interval_label_key=ilabels, event_data=event_data, event_label_key={TimeChartLabelPosition.ABOVE: eventfn}, xmin=0, xmax=INTERVAL, grid_font=arial(10), grid_labels=lambda v: "+{}".format(v), grid_interval=50, labels_left=llabels).pad(2, bg="black") # legend footer_text = ["control: ", Rectangle(20, COLORS['m']), " Muslim ", Rectangle(20, COLORS['s']), " Spanish ", Rectangle(20, COLORS['u']), " British ", " ", ICONS['s'].pad((0,0,0,3),0), " under siege" ] footer = Image.from_multitext(footer_text, [arial(12, bold=True)] + [arial(12)]*(len(footer_text)-1), "white", img_offset=-5) chart = Image.from_column([chart, footer.pad((0,10), 0)], bg="black") # photos images = Image.from_column([Image.from_url_with_cache(u).crop_to_aspect(320,200).resize_fixed_aspect(height=180) for u in PHOTOS]) # bar chart totals = defaultdict(lambda: 0) for _,d in df.iterrows(): totals[d['type']] += d['end'] - d['start'] tot_data = pd.DataFrame([ totals[c] for c in 'msu' ], index=["Muslim", "Spanish", "British"]) tot_palette = [COLORS[c] for c in 'msu'] tot_bar = bar_chart(tot_data, 40, 300, fg="white", bg="black", label_font=arial(12), clabels=BarChartLabelPosition.INSIDE, grid_interval=100, colors=lambda c,r: tot_palette[r], spacing=5) tot_title = Image.from_text("# years of control", arial(16, bold=True), "white") tot_img = Image.from_column([tot_title, tot_bar], padding=5, bg="black") chart = Image.from_row([chart, images, tot_img], bg="black", padding=5) title = Image.from_text("The rock + under siege: a political timeline of Gibraltar".upper(), arial(36, bold=True), "white") img = Image.from_column([title.pad((0,10), 0), chart], bg="black") img.place(Image.from_text("/u/Udzu", font("arial", 12), fg="white", bg="black", padding=3).pad((1,1,0,0), "white"), align=1, padding=(5,5), copy=False) img.save("output/timeline_gibraltar.png")
49.644737
181
0.662073
from pudzu.charts import * from pudzu.dates import * from collections import defaultdict df = pd.read_csv("datasets/timeline_gibraltar.csv") df_events = pd.read_csv("datasets/timeline_gibraltar_events.csv") START, END, INTERVAL = 1000, 2000, 250 PHOTOS = ["http://www.kindredgroup.com/wp-content/uploads/2016/11/gibralta.jpg", "http://www.visitgibraltar.gi/images/site_images/HcZOv_gugu.jpg"] COLORS = { 'm': VegaPalette10.GREEN, 's': VegaPalette10.ORANGE, 'u': VegaPalette10.RED } ICONS = { 's': MaskUnion(..., "white", masks=Image.open("icons/trebuchet.png").to_rgba().resize_fixed_aspect(width=15)) } def colorfn(d, w, h): return COLORS[d['type']] def labelfn(d, w): s = d['period'].replace("\\n", "\n") if "Medina" in d['period'] and w < 20: return None if "Medina" in d['period'] and w < 50: s = "MS" if "Granada" in d['period'] and w < 50: s = "Gra'da" return Image.from_text(s, arial(10), "white", align="center", padding=2, beard_line=True) def eventfn(d): return ICONS[d['type']].pad(2,0) ilabels = {(TimeChartLabelPosition.BELOW, TimeChartLabelPosition.INSIDE): labelfn, TimeChartLabelPosition.ABOVE: lambda: " ", TimeChartLabelPosition.BELOW: lambda: " "} llabels = ["AD {}".format(start) for start in range(START, END, INTERVAL)] data = [df.filter_rows("start<{} and end>{}".format(start+INTERVAL, start)).update_columns(start=lambda v: v-start, end=lambda v: v-start) for start in range(START, END, INTERVAL)] event_data = [df_events.filter_rows("time<{} and time>={}".format(time+INTERVAL, time)).update_columns(time=lambda v: v-time) for time in range(START, END, INTERVAL)] chart = time_chart(800, 40, data, "start", "end", colorfn, label_font=arial(10), interval_label_key=ilabels, event_data=event_data, event_label_key={TimeChartLabelPosition.ABOVE: eventfn}, xmin=0, xmax=INTERVAL, grid_font=arial(10), grid_labels=lambda v: "+{}".format(v), grid_interval=50, labels_left=llabels).pad(2, bg="black") # legend footer_text = ["control: ", Rectangle(20, COLORS['m']), " Muslim ", Rectangle(20, COLORS['s']), " Spanish ", Rectangle(20, COLORS['u']), " British ", " ", ICONS['s'].pad((0,0,0,3),0), " under siege" ] footer = Image.from_multitext(footer_text, [arial(12, bold=True)] + [arial(12)]*(len(footer_text)-1), "white", img_offset=-5) chart = Image.from_column([chart, footer.pad((0,10), 0)], bg="black") # photos images = Image.from_column([Image.from_url_with_cache(u).crop_to_aspect(320,200).resize_fixed_aspect(height=180) for u in PHOTOS]) # bar chart totals = defaultdict(lambda: 0) for _,d in df.iterrows(): totals[d['type']] += d['end'] - d['start'] tot_data = pd.DataFrame([ totals[c] for c in 'msu' ], index=["Muslim", "Spanish", "British"]) tot_palette = [COLORS[c] for c in 'msu'] tot_bar = bar_chart(tot_data, 40, 300, fg="white", bg="black", label_font=arial(12), clabels=BarChartLabelPosition.INSIDE, grid_interval=100, colors=lambda c,r: tot_palette[r], spacing=5) tot_title = Image.from_text("# years of control", arial(16, bold=True), "white") tot_img = Image.from_column([tot_title, tot_bar], padding=5, bg="black") chart = Image.from_row([chart, images, tot_img], bg="black", padding=5) title = Image.from_text("The rock + under siege: a political timeline of Gibraltar".upper(), arial(36, bold=True), "white") img = Image.from_column([title.pad((0,10), 0), chart], bg="black") img.place(Image.from_text("/u/Udzu", font("arial", 12), fg="white", bg="black", padding=3).pad((1,1,0,0), "white"), align=1, padding=(5,5), copy=False) img.save("output/timeline_gibraltar.png")
true
true
1c45cf4bdee098de3ed2c46a413ab004e8e94cbf
1,436
py
Python
tests/test_tools_jobinfo.py
NERSC/pytokio
22244718cf82567c50620cbe0e635dfc990de36b
[ "BSD-3-Clause-LBNL" ]
22
2017-11-14T01:30:48.000Z
2022-01-01T21:51:00.000Z
tests/test_tools_jobinfo.py
glennklockwood/pytokio
22244718cf82567c50620cbe0e635dfc990de36b
[ "BSD-3-Clause-LBNL" ]
39
2017-12-20T01:42:19.000Z
2020-05-28T21:17:26.000Z
tests/test_tools_jobinfo.py
glennklockwood/pytokio
22244718cf82567c50620cbe0e635dfc990de36b
[ "BSD-3-Clause-LBNL" ]
5
2018-02-06T19:39:19.000Z
2019-07-10T01:20:26.000Z
"""Test jobinfo and all supported backends """ import tokio.tools.jobinfo import tokiotest def test_get_job_startend_slurm(): """tools.jobinfo.get_job_startend, Slurm """ tokio.config.CONFIG["jobinfo_jobid_providers"] = ["slurm"] start, end = tokio.tools.jobinfo.get_job_startend( jobid=tokiotest.SAMPLE_DARSHAN_JOBID, cache_file=tokiotest.SAMPLE_SLURM_CACHE_FILE) print(start, end) print(type(start), type(end)) assert start assert end assert start <= end def test_get_job_startend_nerscjobsdb(): """tools.jobinfo.get_job_startend, NerscJobsDb """ tokio.config.CONFIG["jobinfo_jobid_providers"] = ["nersc_jobsdb"] start, end = tokio.tools.jobinfo.get_job_startend( jobid=tokiotest.SAMPLE_DARSHAN_JOBID, cache_file=tokiotest.SAMPLE_NERSCJOBSDB_FILE) print(start, end) print(type(start), type(end)) assert start assert end assert start <= end def test_get_job_nodes_slurm(): """tools.jobinfo.get_job_nodes, Slurm """ tokio.config.CONFIG["jobinfo_jobnodes_providers"] = ["slurm"] jobnodes = tokio.tools.jobinfo.get_job_nodes( jobid=tokiotest.SAMPLE_DARSHAN_JOBID, cache_file=tokiotest.SAMPLE_SLURM_CACHE_FILE) print(type(jobnodes), jobnodes) assert jobnodes if __name__ == "__main__": test_get_job_startend_slurm() test_get_job_startend_nerscjobsdb() test_get_job_nodes_slurm()
31.217391
69
0.722145
import tokio.tools.jobinfo import tokiotest def test_get_job_startend_slurm(): tokio.config.CONFIG["jobinfo_jobid_providers"] = ["slurm"] start, end = tokio.tools.jobinfo.get_job_startend( jobid=tokiotest.SAMPLE_DARSHAN_JOBID, cache_file=tokiotest.SAMPLE_SLURM_CACHE_FILE) print(start, end) print(type(start), type(end)) assert start assert end assert start <= end def test_get_job_startend_nerscjobsdb(): tokio.config.CONFIG["jobinfo_jobid_providers"] = ["nersc_jobsdb"] start, end = tokio.tools.jobinfo.get_job_startend( jobid=tokiotest.SAMPLE_DARSHAN_JOBID, cache_file=tokiotest.SAMPLE_NERSCJOBSDB_FILE) print(start, end) print(type(start), type(end)) assert start assert end assert start <= end def test_get_job_nodes_slurm(): tokio.config.CONFIG["jobinfo_jobnodes_providers"] = ["slurm"] jobnodes = tokio.tools.jobinfo.get_job_nodes( jobid=tokiotest.SAMPLE_DARSHAN_JOBID, cache_file=tokiotest.SAMPLE_SLURM_CACHE_FILE) print(type(jobnodes), jobnodes) assert jobnodes if __name__ == "__main__": test_get_job_startend_slurm() test_get_job_startend_nerscjobsdb() test_get_job_nodes_slurm()
true
true
1c45cf7be7c3d2e904239c5a45cec80098ce6554
78
py
Python
wmf/dump/__init__.py
maribelacosta/wikiwho
5c53f129b018541aad0cc63be5e03a862e6183a1
[ "MIT" ]
17
2015-01-04T15:17:15.000Z
2019-09-17T15:38:43.000Z
wmf/dump/__init__.py
maribelacosta/wikiwho
5c53f129b018541aad0cc63be5e03a862e6183a1
[ "MIT" ]
5
2015-06-03T09:07:40.000Z
2017-03-31T16:36:13.000Z
wmf/dump/__init__.py
maribelacosta/wikiwho
5c53f129b018541aad0cc63be5e03a862e6183a1
[ "MIT" ]
10
2015-02-11T11:50:11.000Z
2021-07-28T02:17:16.000Z
from .iterator import Iterator from .map import map from .map import dumpFile
19.5
30
0.807692
from .iterator import Iterator from .map import map from .map import dumpFile
true
true
1c45cfcca17602a353dfd446b147a8f1cf0251e7
4,479
py
Python
tests/samples.py
chikko80/bit
af557cde90c9021ee16024ab89a000961c6062b4
[ "MIT" ]
1,173
2016-11-30T19:45:44.000Z
2022-03-31T15:43:58.000Z
tests/samples.py
chikko80/bit
af557cde90c9021ee16024ab89a000961c6062b4
[ "MIT" ]
155
2017-03-17T13:06:42.000Z
2022-02-28T16:59:14.000Z
tests/samples.py
chikko80/bit
af557cde90c9021ee16024ab89a000961c6062b4
[ "MIT" ]
197
2017-02-16T04:30:29.000Z
2022-03-24T09:38:29.000Z
import os BINARY_ADDRESS = b'\x00\x92F\x1b\xdeb\x83\xb4a\xec\xe7\xdd\xf4\xdb\xf1\xe0\xa4\x8b\xd1\x13\xd8&E\xb4\xbf' BITCOIN_ADDRESS = '1ELReFsTCUY2mfaDTy32qxYiT49z786eFg' BITCOIN_ADDRESS_COMPRESSED = '1ExJJsNLQDNVVM1s1sdyt1o5P3GC5r32UG' BITCOIN_ADDRESS_NP2WKH = '3291hXxutb58vbDVVumaJpopanmfxjVpgJ' BITCOIN_ADDRESS_PAY2SH = '39SrGQEfFXcTYJhBvjZeQja66Cpz82EEUn' BITCOIN_ADDRESS_P2SH_MULTISIG = '3LtE4jSa7MDD1GEJGEBYjntYQB9KcqzRCG' BITCOIN_ADDRESS_NP2SH_MULTISIG = '32syayPneML4mTuMfimt6ZSLPz87Fya1r6' BITCOIN_ADDRESS_TEST = 'mtrNwJxS1VyHYn3qBY1Qfsm3K3kh1mGRMS' BITCOIN_ADDRESS_TEST_COMPRESSED = 'muUFbvTKDEokGTVUjScMhw1QF2rtv5hxCz' BITCOIN_ADDRESS_TEST_NP2WKH = '2MshDmGtwW3aV8Nr3B3PSvmo5o8yqhwJFKZ' BITCOIN_ADDRESS_TEST_PAY2SH = '2NFKbBHzzh32q5DcZJNgZE9sF7gYmtPbawk' BITCOIN_ADDRESS_TEST_P2SH_MULTISIG = '2NFEYyojDi7eFSZPwBRxSQPn1NPnS9p5upP' BITCOIN_ADDRESS_TEST_NP2SH_MULTISIG = '2NCWeVbWmaUp92dSFP3RddPk6r3GTd6cDd6' BITCOIN_SEGWIT_ADDRESS = 'bc1qar0srrr7xfkvy5l643lydnw9re59gtzzwf5mdq' BITCOIN_SEGWIT_HASH = 'e8df018c7e326cc253faac7e46cdc51e68542c42' BITCOIN_SEGWIT_ADDRESS_TEST = 'tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx' BITCOIN_SEGWIT_HASH_TEST = '751e76e8199196d454941c45d1b3a323f1433bd6' BITCOIN_SEGWIT_ADDRESS_PAY2SH = 'bc1qc7slrfxkknqcq2jevvvkdgvrt8080852dfjewde450xdlk4ugp7szw5tk9' BITCOIN_SEGWIT_HASH_PAY2SH = 'c7a1f1a4d6b4c1802a59631966a18359de779e8a6a65973735a3ccdfdabc407d' BITCOIN_SEGWIT_ADDRESS_TEST_PAY2SH = 'tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7' BITCOIN_SEGWIT_HASH_TEST_PAY2SH = '1863143c14c5166804bd19203356da136c985678cd4d27a1b8c6329604903262' PAY2SH_HASH = b'U\x13\x1e\xfbz\x0e\xddLv\xcc;\xbe\x83;\xfcY\xa6\xf7<k' PAY2SH_TEST_HASH = b'\xf2&\x1e\x95d\xc9\xdf\xff\xa8\x15\x05\xc1S\xfb\x95\xbf\x93\x99C\x08' PRIVATE_KEY_BYTES = b'\xc2\x8a\x9f\x80s\x8fw\rRx\x03\xa5f\xcfo\xc3\xed\xf6\xce\xa5\x86\xc4\xfcJR#\xa5\xady~\x1a\xc3' PRIVATE_KEY_DER = ( b"0\x81\x84\x02\x01\x000\x10\x06\x07*\x86H\xce=\x02\x01\x06" b"\x05+\x81\x04\x00\n\x04m0k\x02\x01\x01\x04 \xc2\x8a\x9f" b"\x80s\x8fw\rRx\x03\xa5f\xcfo\xc3\xed\xf6\xce\xa5\x86\xc4" b"\xfcJR#\xa5\xady~\x1a\xc3\xa1D\x03B\x00\x04=\\(u\xc9\xbd" b"\x11hu\xa7\x1a]\xb6L\xff\xcb\x139k\x16=\x03\x9b\x1d\x93'" b"\x82H\x91\x80C4v\xa45**\xdd\x00\xeb\xb0\xd5\xc9LQ[r\xeb" b"\x10\xf1\xfd\x8f?\x03\xb4/J+%[\xfc\x9a\xa9\xe3" ) PRIVATE_KEY_HEX = 'c28a9f80738f770d527803a566cf6fc3edf6cea586c4fc4a5223a5ad797e1ac3' PRIVATE_KEY_NUM = 87993618360805341115891506172036624893404292644470266399436498750715784469187 PRIVATE_KEY_PEM = ( b'-----BEGIN PRIVATE KEY-----\n' b'MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQgwoqfgHOPdw1SeAOlZs9v\n' b'w+32zqWGxPxKUiOlrXl+GsOhRANCAAQ9XCh1yb0RaHWnGl22TP/LEzlrFj0Dmx2T\n' b'J4JIkYBDNHakNSoq3QDrsNXJTFFbcusQ8f2PPwO0L0orJVv8mqnj\n' b'-----END PRIVATE KEY-----\n' ) PUBKEY_HASH = b'\x92F\x1b\xdeb\x83\xb4a\xec\xe7\xdd\xf4\xdb\xf1\xe0\xa4\x8b\xd1\x13\xd8' PUBKEY_HASH_HEX = '043d5c2875c9bd116875a71a5db64cffcb13396b163d039b1d932782489180433476a4352a2add00ebb0d5c94c515b72eb10f1fd8f3f03b42f4a2b255bfc9aa9e3' PUBKEY_HASH_COMPRESSED = b'\x99\x0e\xf6\rc\xb5\xb5\x96J\x1c"\x82\x06\x1a\xf4Q#\xe9?\xcb' PUBLIC_KEY_COMPRESSED = b"\x03=\\(u\xc9\xbd\x11hu\xa7\x1a]\xb6L\xff\xcb\x139k\x16=\x03\x9b\x1d\x93'\x82H\x91\x80C4" PUBLIC_KEY_UNCOMPRESSED = ( b"\x04=\\(u\xc9\xbd\x11hu\xa7\x1a]\xb6L\xff\xcb\x139k\x16=\x03" b"\x9b\x1d\x93'\x82H\x91\x80C4v\xa45**\xdd\x00\xeb\xb0\xd5\xc9" b"LQ[r\xeb\x10\xf1\xfd\x8f?\x03\xb4/J+%[\xfc\x9a\xa9\xe3" ) PUBLIC_KEY_X = 27753912938952041417634381842191885283234814940840273460372041880794577257268 PUBLIC_KEY_Y = 53663045980837260634637807506183816949039230809110041985901491152185762425315 WALLET_FORMAT_COMPRESSED_MAIN = 'L3jsepcttyuJK3HKezD4qqRKGtwc8d2d1Nw6vsoPDX9cMcUxqqMv' WALLET_FORMAT_COMPRESSED_TEST = 'cU6s7jckL3bZUUkb3Q2CD9vNu8F1o58K5R5a3JFtidoccMbhEGKZ' WALLET_FORMAT_COMPRESSED_SEND_TEST = os.environ.get('WALLET_FORMAT_COMPRESSED_SEND_TEST', '') WALLET_FORMAT_MAIN = '5KHxtARu5yr1JECrYGEA2YpCPdh1i9ciEgQayAF8kcqApkGzT9s' WALLET_FORMAT_MAIN_1 = 'L3MWPaUtPpnBx7QZtMTrcKz437JFCLzz8GQ916UAZtk3P51w7cpo' WALLET_FORMAT_MAIN_2 = 'Kxat4TMUAv3f7H4g52NKbAyEncj72h3wujGVfoGhU52WdQxrZuFd' WALLET_FORMAT_TEST = '934bTuFSgCv9GHi9Ac84u9NA3J3isK9uadGY3nbe6MaDbnQdcbn' WALLET_FORMAT_SEND_TEST = os.environ.get('WALLET_FORMAT_SEND_TEST', '') WALLET_FORMAT_TEST_1 = 'KzDFE5K1Mb6cP2SAUySLUMx8F8KWn5RYiUFTFXcDMY5X22Jp1MvH' WALLET_FORMAT_TEST_2 = 'KxhDjpU1TWDbD2ukWpTcj9T55bkKVJsyn2hcDucCecpjApetCRG9'
67.863636
150
0.841036
import os BINARY_ADDRESS = b'\x00\x92F\x1b\xdeb\x83\xb4a\xec\xe7\xdd\xf4\xdb\xf1\xe0\xa4\x8b\xd1\x13\xd8&E\xb4\xbf' BITCOIN_ADDRESS = '1ELReFsTCUY2mfaDTy32qxYiT49z786eFg' BITCOIN_ADDRESS_COMPRESSED = '1ExJJsNLQDNVVM1s1sdyt1o5P3GC5r32UG' BITCOIN_ADDRESS_NP2WKH = '3291hXxutb58vbDVVumaJpopanmfxjVpgJ' BITCOIN_ADDRESS_PAY2SH = '39SrGQEfFXcTYJhBvjZeQja66Cpz82EEUn' BITCOIN_ADDRESS_P2SH_MULTISIG = '3LtE4jSa7MDD1GEJGEBYjntYQB9KcqzRCG' BITCOIN_ADDRESS_NP2SH_MULTISIG = '32syayPneML4mTuMfimt6ZSLPz87Fya1r6' BITCOIN_ADDRESS_TEST = 'mtrNwJxS1VyHYn3qBY1Qfsm3K3kh1mGRMS' BITCOIN_ADDRESS_TEST_COMPRESSED = 'muUFbvTKDEokGTVUjScMhw1QF2rtv5hxCz' BITCOIN_ADDRESS_TEST_NP2WKH = '2MshDmGtwW3aV8Nr3B3PSvmo5o8yqhwJFKZ' BITCOIN_ADDRESS_TEST_PAY2SH = '2NFKbBHzzh32q5DcZJNgZE9sF7gYmtPbawk' BITCOIN_ADDRESS_TEST_P2SH_MULTISIG = '2NFEYyojDi7eFSZPwBRxSQPn1NPnS9p5upP' BITCOIN_ADDRESS_TEST_NP2SH_MULTISIG = '2NCWeVbWmaUp92dSFP3RddPk6r3GTd6cDd6' BITCOIN_SEGWIT_ADDRESS = 'bc1qar0srrr7xfkvy5l643lydnw9re59gtzzwf5mdq' BITCOIN_SEGWIT_HASH = 'e8df018c7e326cc253faac7e46cdc51e68542c42' BITCOIN_SEGWIT_ADDRESS_TEST = 'tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx' BITCOIN_SEGWIT_HASH_TEST = '751e76e8199196d454941c45d1b3a323f1433bd6' BITCOIN_SEGWIT_ADDRESS_PAY2SH = 'bc1qc7slrfxkknqcq2jevvvkdgvrt8080852dfjewde450xdlk4ugp7szw5tk9' BITCOIN_SEGWIT_HASH_PAY2SH = 'c7a1f1a4d6b4c1802a59631966a18359de779e8a6a65973735a3ccdfdabc407d' BITCOIN_SEGWIT_ADDRESS_TEST_PAY2SH = 'tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7' BITCOIN_SEGWIT_HASH_TEST_PAY2SH = '1863143c14c5166804bd19203356da136c985678cd4d27a1b8c6329604903262' PAY2SH_HASH = b'U\x13\x1e\xfbz\x0e\xddLv\xcc;\xbe\x83;\xfcY\xa6\xf7<k' PAY2SH_TEST_HASH = b'\xf2&\x1e\x95d\xc9\xdf\xff\xa8\x15\x05\xc1S\xfb\x95\xbf\x93\x99C\x08' PRIVATE_KEY_BYTES = b'\xc2\x8a\x9f\x80s\x8fw\rRx\x03\xa5f\xcfo\xc3\xed\xf6\xce\xa5\x86\xc4\xfcJR#\xa5\xady~\x1a\xc3' PRIVATE_KEY_DER = ( b"0\x81\x84\x02\x01\x000\x10\x06\x07*\x86H\xce=\x02\x01\x06" b"\x05+\x81\x04\x00\n\x04m0k\x02\x01\x01\x04 \xc2\x8a\x9f" b"\x80s\x8fw\rRx\x03\xa5f\xcfo\xc3\xed\xf6\xce\xa5\x86\xc4" b"\xfcJR#\xa5\xady~\x1a\xc3\xa1D\x03B\x00\x04=\\(u\xc9\xbd" b"\x11hu\xa7\x1a]\xb6L\xff\xcb\x139k\x16=\x03\x9b\x1d\x93'" b"\x82H\x91\x80C4v\xa45**\xdd\x00\xeb\xb0\xd5\xc9LQ[r\xeb" b"\x10\xf1\xfd\x8f?\x03\xb4/J+%[\xfc\x9a\xa9\xe3" ) PRIVATE_KEY_HEX = 'c28a9f80738f770d527803a566cf6fc3edf6cea586c4fc4a5223a5ad797e1ac3' PRIVATE_KEY_NUM = 87993618360805341115891506172036624893404292644470266399436498750715784469187 PRIVATE_KEY_PEM = ( b'-----BEGIN PRIVATE KEY-----\n' b'MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQgwoqfgHOPdw1SeAOlZs9v\n' b'w+32zqWGxPxKUiOlrXl+GsOhRANCAAQ9XCh1yb0RaHWnGl22TP/LEzlrFj0Dmx2T\n' b'J4JIkYBDNHakNSoq3QDrsNXJTFFbcusQ8f2PPwO0L0orJVv8mqnj\n' b'-----END PRIVATE KEY-----\n' ) PUBKEY_HASH = b'\x92F\x1b\xdeb\x83\xb4a\xec\xe7\xdd\xf4\xdb\xf1\xe0\xa4\x8b\xd1\x13\xd8' PUBKEY_HASH_HEX = '043d5c2875c9bd116875a71a5db64cffcb13396b163d039b1d932782489180433476a4352a2add00ebb0d5c94c515b72eb10f1fd8f3f03b42f4a2b255bfc9aa9e3' PUBKEY_HASH_COMPRESSED = b'\x99\x0e\xf6\rc\xb5\xb5\x96J\x1c"\x82\x06\x1a\xf4Q#\xe9?\xcb' PUBLIC_KEY_COMPRESSED = b"\x03=\\(u\xc9\xbd\x11hu\xa7\x1a]\xb6L\xff\xcb\x139k\x16=\x03\x9b\x1d\x93'\x82H\x91\x80C4" PUBLIC_KEY_UNCOMPRESSED = ( b"\x04=\\(u\xc9\xbd\x11hu\xa7\x1a]\xb6L\xff\xcb\x139k\x16=\x03" b"\x9b\x1d\x93'\x82H\x91\x80C4v\xa45**\xdd\x00\xeb\xb0\xd5\xc9" b"LQ[r\xeb\x10\xf1\xfd\x8f?\x03\xb4/J+%[\xfc\x9a\xa9\xe3" ) PUBLIC_KEY_X = 27753912938952041417634381842191885283234814940840273460372041880794577257268 PUBLIC_KEY_Y = 53663045980837260634637807506183816949039230809110041985901491152185762425315 WALLET_FORMAT_COMPRESSED_MAIN = 'L3jsepcttyuJK3HKezD4qqRKGtwc8d2d1Nw6vsoPDX9cMcUxqqMv' WALLET_FORMAT_COMPRESSED_TEST = 'cU6s7jckL3bZUUkb3Q2CD9vNu8F1o58K5R5a3JFtidoccMbhEGKZ' WALLET_FORMAT_COMPRESSED_SEND_TEST = os.environ.get('WALLET_FORMAT_COMPRESSED_SEND_TEST', '') WALLET_FORMAT_MAIN = '5KHxtARu5yr1JECrYGEA2YpCPdh1i9ciEgQayAF8kcqApkGzT9s' WALLET_FORMAT_MAIN_1 = 'L3MWPaUtPpnBx7QZtMTrcKz437JFCLzz8GQ916UAZtk3P51w7cpo' WALLET_FORMAT_MAIN_2 = 'Kxat4TMUAv3f7H4g52NKbAyEncj72h3wujGVfoGhU52WdQxrZuFd' WALLET_FORMAT_TEST = '934bTuFSgCv9GHi9Ac84u9NA3J3isK9uadGY3nbe6MaDbnQdcbn' WALLET_FORMAT_SEND_TEST = os.environ.get('WALLET_FORMAT_SEND_TEST', '') WALLET_FORMAT_TEST_1 = 'KzDFE5K1Mb6cP2SAUySLUMx8F8KWn5RYiUFTFXcDMY5X22Jp1MvH' WALLET_FORMAT_TEST_2 = 'KxhDjpU1TWDbD2ukWpTcj9T55bkKVJsyn2hcDucCecpjApetCRG9'
true
true
1c45cff204c2083e87e0c5b405062242ac5f4e29
2,254
py
Python
.config/qutebrowser/config.py
SqrtMinusOne/dotfiles
1121bd865cb9ed019e9e4c257155e2fb483d98c5
[ "Apache-2.0" ]
12
2021-05-01T11:08:55.000Z
2022-03-27T05:57:02.000Z
.config/qutebrowser/config.py
SqrtMinusOne/dotfiles
1121bd865cb9ed019e9e4c257155e2fb483d98c5
[ "Apache-2.0" ]
1
2022-02-13T14:54:29.000Z
2022-02-13T15:42:55.000Z
.config/qutebrowser/config.py
SqrtMinusOne/dotfiles
1121bd865cb9ed019e9e4c257155e2fb483d98c5
[ "Apache-2.0" ]
4
2021-05-22T21:31:28.000Z
2022-03-30T21:28:33.000Z
import os import dracula.draw from qutebrowser.api import interceptor def filter_yt(info: interceptor.Request): """Block the given request if necessary.""" url = info.request_url if (url.host() == 'www.youtube.com' and url.path() == '/get_video_info' and '&adformat=' in url.query()): info.block() interceptor.register(filter_yt) config.load_autoconfig() config.unbind('gt', mode='normal') config.bind('gt', 'tab-next') config.bind('gT', 'tab-prev') config.bind('gN', 'tab-close') config.bind('gn', 'tab-clone') c.fonts.default_size = '10pt' c.fonts.default_family = 'monospace' c.fonts.web.size.default_fixed = 13 # config.unbind('T', mode='normal') # config.bind('T', 'set-cmd-text -s :buffer') # c.content.javascript.enabled = False config.bind('\\t', 'set-cmd-text -s :buffer') config.bind('\\b', 'set-cmd-text -s :bookmark-load') config.bind('\\ww', ':open file:///home/pavel/Documents/org-mode/Bookmarks/bookmarks.html') config.bind('\\z1', 'set zoom.default 100 ;; set fonts.default_size 10pt') config.bind('\\z2', 'set zoom.default 125 ;; set fonts.default_size 12pt') if c.colors.webpage.darkmode.enabled: config.bind('\\d', 'set colors.webpage.darkmode.enabled False ;; restart') else: config.bind('\\d', 'set colors.webpage.darkmode.enabled True ;; restart') # config.unbind('<Escape>', mode='insert') config.bind('<Shift-Escape>', 'fake-key <Escape>', mode='insert') RUSSIAN = 'йцукенгшщзхъфывапролджэячсмитьбю.' ENGLISH = 'qwertyuiop[]asdfghjkl;\'zxcvbnm,./' c.bindings.key_mappings = { **{r: e for r, e in zip(RUSSIAN, ENGLISH)}, **{r.upper(): e.upper() for r, e in zip(RUSSIAN, ENGLISH)} } c.editor.command = [ 'nvim', '-f', '{file}', '-c', 'normal {line}G{column0}l' ] c.scrolling.bar = 'always' c.url.searchengines = { "DEFAULT": "https://duckduckgo.com/?q={}", "g": "https://www.google.com/search?hl=en&q={}" } c.zoom.levels = ['25%', '33%', '50%', '67%', '75%', '90%', '100%', '110%', '125%', '133%', '150%', '175%', '200%', '250%', '300%', '400%', '500%'] if os.uname().nodename == 'pavel-ntk': c.zoom.default = '133%' dracula.draw.blood(c, { 'spacing': { 'vertical': 6, 'horizontal': 8 } })
26.517647
91
0.624667
import os import dracula.draw from qutebrowser.api import interceptor def filter_yt(info: interceptor.Request): url = info.request_url if (url.host() == 'www.youtube.com' and url.path() == '/get_video_info' and '&adformat=' in url.query()): info.block() interceptor.register(filter_yt) config.load_autoconfig() config.unbind('gt', mode='normal') config.bind('gt', 'tab-next') config.bind('gT', 'tab-prev') config.bind('gN', 'tab-close') config.bind('gn', 'tab-clone') c.fonts.default_size = '10pt' c.fonts.default_family = 'monospace' c.fonts.web.size.default_fixed = 13 config.bind('\\t', 'set-cmd-text -s :buffer') config.bind('\\b', 'set-cmd-text -s :bookmark-load') config.bind('\\ww', ':open file:///home/pavel/Documents/org-mode/Bookmarks/bookmarks.html') config.bind('\\z1', 'set zoom.default 100 ;; set fonts.default_size 10pt') config.bind('\\z2', 'set zoom.default 125 ;; set fonts.default_size 12pt') if c.colors.webpage.darkmode.enabled: config.bind('\\d', 'set colors.webpage.darkmode.enabled False ;; restart') else: config.bind('\\d', 'set colors.webpage.darkmode.enabled True ;; restart') config.bind('<Shift-Escape>', 'fake-key <Escape>', mode='insert') RUSSIAN = 'йцукенгшщзхъфывапролджэячсмитьбю.' ENGLISH = 'qwertyuiop[]asdfghjkl;\'zxcvbnm,./' c.bindings.key_mappings = { **{r: e for r, e in zip(RUSSIAN, ENGLISH)}, **{r.upper(): e.upper() for r, e in zip(RUSSIAN, ENGLISH)} } c.editor.command = [ 'nvim', '-f', '{file}', '-c', 'normal {line}G{column0}l' ] c.scrolling.bar = 'always' c.url.searchengines = { "DEFAULT": "https://duckduckgo.com/?q={}", "g": "https://www.google.com/search?hl=en&q={}" } c.zoom.levels = ['25%', '33%', '50%', '67%', '75%', '90%', '100%', '110%', '125%', '133%', '150%', '175%', '200%', '250%', '300%', '400%', '500%'] if os.uname().nodename == 'pavel-ntk': c.zoom.default = '133%' dracula.draw.blood(c, { 'spacing': { 'vertical': 6, 'horizontal': 8 } })
true
true
1c45d042e89a5bb966939c08622d51ac265a1ecd
3,127
py
Python
tmt/steps/report/junit.py
KwisatzHaderach/tmt
75ff90a543240d39c45baa849e6a3149545be0fd
[ "MIT" ]
null
null
null
tmt/steps/report/junit.py
KwisatzHaderach/tmt
75ff90a543240d39c45baa849e6a3149545be0fd
[ "MIT" ]
null
null
null
tmt/steps/report/junit.py
KwisatzHaderach/tmt
75ff90a543240d39c45baa849e6a3149545be0fd
[ "MIT" ]
null
null
null
import os import click import tmt import tmt.steps.report DEFAULT_NAME = "junit.xml" def import_junit_xml(): """ Import junit_xml module only when needed Until we have a separate package for each plugin. """ global junit_xml try: import junit_xml except ImportError: raise tmt.utils.ReportError( "Missing 'junit-xml', fixable by 'pip install tmt[report-junit]'.") def duration_to_seconds(duration): """ Convert valid duration string in to seconds """ if duration is None: return None try: h, m, s = duration.split(':') return int(h) * 3600 + int(m) * 60 + int(s) except Exception as error: raise tmt.utils.ReportError( f"Malformed duration '{duration}' ({error}).") class ReportJUnit(tmt.steps.report.ReportPlugin): """ Write test results in JUnit format When FILE is not specified output is written to the 'junit.xml' located in the current workdir. """ # Supported methods _methods = [tmt.steps.Method(name='junit', doc=__doc__, order=50)] # Supported keys _keys = ["file"] @classmethod def options(cls, how=None): """ Prepare command line options for connect """ return [ click.option( '--file', metavar='FILE', help='Path to the file to store junit to'), ] + super().options(how) def go(self): """ Read executed tests and write junit """ super().go() import_junit_xml() suite = junit_xml.TestSuite(self.step.plan.name) for result in self.step.plan.execute.results(): try: main_log = self.step.plan.execute.read(result.log[0]) except (IndexError, AttributeError): main_log = None case = junit_xml.TestCase( result.name, classname=None, elapsed_sec=duration_to_seconds(result.duration), stdout=main_log ) # Map tmt OUTCOME to JUnit states if result.result == "error": case.add_error_info(result.result) elif result.result == "fail": case.add_failure_info(result.result) elif result.result == "info": case.add_skipped_info(result.result) elif result.result == "warn": case.add_error_info(result.result) # Passed state is the default suite.test_cases.append(case) f_path = self.opt("file", os.path.join(self.workdir, DEFAULT_NAME)) try: with open(f_path, 'w') as fw: if hasattr(junit_xml, 'to_xml_report_file'): junit_xml.to_xml_report_file(fw, [suite]) else: # For older junit-xml junit_xml.TestSuite.to_file(fw, [suite]) self.info("output", f_path, 'yellow') except Exception as error: raise tmt.utils.ReportError( f"Failed to write the output '{f_path}' ({error}).")
30.960396
79
0.568276
import os import click import tmt import tmt.steps.report DEFAULT_NAME = "junit.xml" def import_junit_xml(): global junit_xml try: import junit_xml except ImportError: raise tmt.utils.ReportError( "Missing 'junit-xml', fixable by 'pip install tmt[report-junit]'.") def duration_to_seconds(duration): if duration is None: return None try: h, m, s = duration.split(':') return int(h) * 3600 + int(m) * 60 + int(s) except Exception as error: raise tmt.utils.ReportError( f"Malformed duration '{duration}' ({error}).") class ReportJUnit(tmt.steps.report.ReportPlugin): _methods = [tmt.steps.Method(name='junit', doc=__doc__, order=50)] _keys = ["file"] @classmethod def options(cls, how=None): return [ click.option( '--file', metavar='FILE', help='Path to the file to store junit to'), ] + super().options(how) def go(self): super().go() import_junit_xml() suite = junit_xml.TestSuite(self.step.plan.name) for result in self.step.plan.execute.results(): try: main_log = self.step.plan.execute.read(result.log[0]) except (IndexError, AttributeError): main_log = None case = junit_xml.TestCase( result.name, classname=None, elapsed_sec=duration_to_seconds(result.duration), stdout=main_log ) if result.result == "error": case.add_error_info(result.result) elif result.result == "fail": case.add_failure_info(result.result) elif result.result == "info": case.add_skipped_info(result.result) elif result.result == "warn": case.add_error_info(result.result) suite.test_cases.append(case) f_path = self.opt("file", os.path.join(self.workdir, DEFAULT_NAME)) try: with open(f_path, 'w') as fw: if hasattr(junit_xml, 'to_xml_report_file'): junit_xml.to_xml_report_file(fw, [suite]) else: junit_xml.TestSuite.to_file(fw, [suite]) self.info("output", f_path, 'yellow') except Exception as error: raise tmt.utils.ReportError( f"Failed to write the output '{f_path}' ({error}).")
true
true
1c45d09ac800551f95112d696ee3ba6ef9d53511
5,786
py
Python
packaging/dicarlo/sanghavi/sanghavimurty2020things1.py
dmayo/brain-score
3ab4258152c9e3f8c7d29afb10158b184dbcebbe
[ "MIT" ]
52
2019-12-13T06:43:44.000Z
2022-02-21T07:47:39.000Z
packaging/dicarlo/sanghavi/sanghavimurty2020things1.py
dmayo/brain-score
3ab4258152c9e3f8c7d29afb10158b184dbcebbe
[ "MIT" ]
104
2019-12-06T18:08:54.000Z
2022-03-31T23:57:51.000Z
packaging/dicarlo/sanghavi/sanghavimurty2020things1.py
dmayo/brain-score
3ab4258152c9e3f8c7d29afb10158b184dbcebbe
[ "MIT" ]
32
2019-12-05T14:31:14.000Z
2022-03-10T02:04:45.000Z
import os from pathlib import Path import json import numpy as np import xarray as xr import pandas as pd from brainio_base.assemblies import NeuronRecordingAssembly from brainio_base.stimuli import StimulusSet from brainio_collection.packaging import package_data_assembly, package_stimulus_set from mkgu_packaging.dicarlo.sanghavi import filter_neuroids def collect_stimuli(data_dir): image_dir = data_dir / 'images' / 'things-1' assert os.path.isdir(image_dir) files = sorted(os.listdir(image_dir), key=lambda x: int(os.path.splitext(x)[0])) files = files[:-130] # Discard last 130 images (5 grey and 25x5 normalizer images) assert os.path.isdir(data_dir / 'image-metadata') stimuli = pd.read_csv(data_dir / 'image-metadata' / 'things_1_metadata.csv') stimuli = stimuli.rename(columns={'id': 'image_id'}) stimuli['image_current_local_file_path'] = stimuli.apply( lambda row: os.path.join(image_dir, str(row.image_id) + '.jpg'), axis=1) assert len(np.unique(stimuli['image_id'])) == len(stimuli) stimuli = StimulusSet(stimuli) stimuli.image_paths = \ {stimuli.at[idx, 'image_id']: stimuli.at[idx, 'image_current_local_file_path'] for idx in range(len(stimuli))} return stimuli def load_responses(data_dir, stimuli): data_dir = data_dir / 'database' assert os.path.isdir(data_dir) psth = np.load(data_dir / 'solo.rsvp.things-1.experiment_psth.npy') # Shaped images x repetitions x time_bins x channels # Compute firing rate for given time bins timebins = [[70, 170], [170, 270], [50, 100], [100, 150], [150, 200], [200, 250], [70, 270]] photodiode_delay = 30 # Delay recorded on photodiode is ~30ms timebase = np.arange(-100, 381, 10) # PSTH from -100ms to 380ms relative to stimulus onset assert len(timebase) == psth.shape[2] rate = np.empty((len(timebins), psth.shape[0], psth.shape[1], psth.shape[3])) for idx, tb in enumerate(timebins): t_cols = np.where((timebase >= (tb[0] + photodiode_delay)) & (timebase < (tb[1] + photodiode_delay)))[0] rate[idx] = np.mean(psth[:, :, t_cols, :], axis=2) # Shaped time bins x images x repetitions x channels assembly = xr.DataArray(rate, coords={'repetition': ('repetition', list(range(rate.shape[2]))), 'time_bin_id': ('time_bin', list(range(rate.shape[0]))), 'time_bin_start': ('time_bin', [x[0] for x in timebins]), 'time_bin_stop': ('time_bin', [x[1] for x in timebins])}, dims=['time_bin', 'image', 'repetition', 'neuroid']) # Add neuroid related meta data neuroid_meta = pd.DataFrame(json.load(open(data_dir.parent / 'array-metadata' / 'mapping.json'))) for column_name, column_data in neuroid_meta.iteritems(): assembly = assembly.assign_coords(**{f'{column_name}': ('neuroid', list(column_data.values))}) # Add stimulus related meta data for column_name, column_data in stimuli.iteritems(): assembly = assembly.assign_coords(**{f'{column_name}': ('image', list(column_data.values))}) # Collapse dimensions 'image' and 'repetitions' into a single 'presentation' dimension assembly = assembly.stack(presentation=('image', 'repetition')).reset_index('presentation') assembly = assembly.drop('image') assembly = NeuronRecordingAssembly(assembly) # Filter noisy electrodes psth = np.load(data_dir / 'solo.rsvp.things-1.normalizer_psth.npy') t_cols = np.where((timebase >= (70 + photodiode_delay)) & (timebase < (170 + photodiode_delay)))[0] rate = np.mean(psth[:, :, t_cols, :], axis=2) normalizer_assembly = xr.DataArray(rate, coords={'repetition': ('repetition', list(range(rate.shape[1]))), 'image_id': ('image', list(range(rate.shape[0]))), 'id': ('image', list(range(rate.shape[0])))}, dims=['image', 'repetition', 'neuroid']) for column_name, column_data in neuroid_meta.iteritems(): normalizer_assembly = normalizer_assembly.assign_coords( **{f'{column_name}': ('neuroid', list(column_data.values))}) normalizer_assembly = normalizer_assembly.stack(presentation=('image', 'repetition')).reset_index('presentation') normalizer_assembly = normalizer_assembly.drop('image') normalizer_assembly = normalizer_assembly.transpose('presentation', 'neuroid') normalizer_assembly = NeuronRecordingAssembly(normalizer_assembly) filtered_assembly = filter_neuroids(normalizer_assembly, 0.7) assembly = assembly.sel(neuroid=np.isin(assembly.neuroid_id, filtered_assembly.neuroid_id)) assembly = assembly.transpose('presentation', 'neuroid', 'time_bin') # Add other experiment and data processing related info assembly.attrs['image_size_degree'] = 8 assembly.attrs['stim_on_time_ms'] = 100 return assembly def main(): data_dir = Path(__file__).parents[6] / 'data2' / 'active' / 'users' / 'sachis' assert os.path.isdir(data_dir) stimuli = collect_stimuli(data_dir) stimuli.identifier = 'dicarlo.THINGS1' assembly = load_responses(data_dir, stimuli) assembly.name = 'dicarlo.SanghaviMurty2020THINGS1' print('Packaging stimuli') package_stimulus_set(stimuli, stimulus_set_identifier=stimuli.identifier, bucket_name='brainio.dicarlo') print('Packaging assembly') package_data_assembly(assembly, assembly_identifier=assembly.name, stimulus_set_identifier=stimuli.identifier, bucket_name='brainio.dicarlo') return if __name__ == '__main__': main()
48.621849
125
0.667128
import os from pathlib import Path import json import numpy as np import xarray as xr import pandas as pd from brainio_base.assemblies import NeuronRecordingAssembly from brainio_base.stimuli import StimulusSet from brainio_collection.packaging import package_data_assembly, package_stimulus_set from mkgu_packaging.dicarlo.sanghavi import filter_neuroids def collect_stimuli(data_dir): image_dir = data_dir / 'images' / 'things-1' assert os.path.isdir(image_dir) files = sorted(os.listdir(image_dir), key=lambda x: int(os.path.splitext(x)[0])) files = files[:-130] assert os.path.isdir(data_dir / 'image-metadata') stimuli = pd.read_csv(data_dir / 'image-metadata' / 'things_1_metadata.csv') stimuli = stimuli.rename(columns={'id': 'image_id'}) stimuli['image_current_local_file_path'] = stimuli.apply( lambda row: os.path.join(image_dir, str(row.image_id) + '.jpg'), axis=1) assert len(np.unique(stimuli['image_id'])) == len(stimuli) stimuli = StimulusSet(stimuli) stimuli.image_paths = \ {stimuli.at[idx, 'image_id']: stimuli.at[idx, 'image_current_local_file_path'] for idx in range(len(stimuli))} return stimuli def load_responses(data_dir, stimuli): data_dir = data_dir / 'database' assert os.path.isdir(data_dir) psth = np.load(data_dir / 'solo.rsvp.things-1.experiment_psth.npy') timebins = [[70, 170], [170, 270], [50, 100], [100, 150], [150, 200], [200, 250], [70, 270]] photodiode_delay = 30 timebase = np.arange(-100, 381, 10) assert len(timebase) == psth.shape[2] rate = np.empty((len(timebins), psth.shape[0], psth.shape[1], psth.shape[3])) for idx, tb in enumerate(timebins): t_cols = np.where((timebase >= (tb[0] + photodiode_delay)) & (timebase < (tb[1] + photodiode_delay)))[0] rate[idx] = np.mean(psth[:, :, t_cols, :], axis=2) assembly = xr.DataArray(rate, coords={'repetition': ('repetition', list(range(rate.shape[2]))), 'time_bin_id': ('time_bin', list(range(rate.shape[0]))), 'time_bin_start': ('time_bin', [x[0] for x in timebins]), 'time_bin_stop': ('time_bin', [x[1] for x in timebins])}, dims=['time_bin', 'image', 'repetition', 'neuroid']) neuroid_meta = pd.DataFrame(json.load(open(data_dir.parent / 'array-metadata' / 'mapping.json'))) for column_name, column_data in neuroid_meta.iteritems(): assembly = assembly.assign_coords(**{f'{column_name}': ('neuroid', list(column_data.values))}) for column_name, column_data in stimuli.iteritems(): assembly = assembly.assign_coords(**{f'{column_name}': ('image', list(column_data.values))}) assembly = assembly.stack(presentation=('image', 'repetition')).reset_index('presentation') assembly = assembly.drop('image') assembly = NeuronRecordingAssembly(assembly) psth = np.load(data_dir / 'solo.rsvp.things-1.normalizer_psth.npy') t_cols = np.where((timebase >= (70 + photodiode_delay)) & (timebase < (170 + photodiode_delay)))[0] rate = np.mean(psth[:, :, t_cols, :], axis=2) normalizer_assembly = xr.DataArray(rate, coords={'repetition': ('repetition', list(range(rate.shape[1]))), 'image_id': ('image', list(range(rate.shape[0]))), 'id': ('image', list(range(rate.shape[0])))}, dims=['image', 'repetition', 'neuroid']) for column_name, column_data in neuroid_meta.iteritems(): normalizer_assembly = normalizer_assembly.assign_coords( **{f'{column_name}': ('neuroid', list(column_data.values))}) normalizer_assembly = normalizer_assembly.stack(presentation=('image', 'repetition')).reset_index('presentation') normalizer_assembly = normalizer_assembly.drop('image') normalizer_assembly = normalizer_assembly.transpose('presentation', 'neuroid') normalizer_assembly = NeuronRecordingAssembly(normalizer_assembly) filtered_assembly = filter_neuroids(normalizer_assembly, 0.7) assembly = assembly.sel(neuroid=np.isin(assembly.neuroid_id, filtered_assembly.neuroid_id)) assembly = assembly.transpose('presentation', 'neuroid', 'time_bin') assembly.attrs['image_size_degree'] = 8 assembly.attrs['stim_on_time_ms'] = 100 return assembly def main(): data_dir = Path(__file__).parents[6] / 'data2' / 'active' / 'users' / 'sachis' assert os.path.isdir(data_dir) stimuli = collect_stimuli(data_dir) stimuli.identifier = 'dicarlo.THINGS1' assembly = load_responses(data_dir, stimuli) assembly.name = 'dicarlo.SanghaviMurty2020THINGS1' print('Packaging stimuli') package_stimulus_set(stimuli, stimulus_set_identifier=stimuli.identifier, bucket_name='brainio.dicarlo') print('Packaging assembly') package_data_assembly(assembly, assembly_identifier=assembly.name, stimulus_set_identifier=stimuli.identifier, bucket_name='brainio.dicarlo') return if __name__ == '__main__': main()
true
true
1c45d15d423872579297a22d537eec56230d8c1c
197
py
Python
Aprendendo Python/cursopythonudamy/aula16while_contador_acumulador.py
JlucasS777/Aprendendo-Python
a3a960260070f0d604c27fbbc41578a6ab11edb5
[ "MIT" ]
null
null
null
Aprendendo Python/cursopythonudamy/aula16while_contador_acumulador.py
JlucasS777/Aprendendo-Python
a3a960260070f0d604c27fbbc41578a6ab11edb5
[ "MIT" ]
null
null
null
Aprendendo Python/cursopythonudamy/aula16while_contador_acumulador.py
JlucasS777/Aprendendo-Python
a3a960260070f0d604c27fbbc41578a6ab11edb5
[ "MIT" ]
null
null
null
contador = 1 acumulador = 1 while contador <= 10: print(contador,acumulador) if contador>5: break contador+=1 else : print("Cheguei ao final do programa e usei o laço else")
21.888889
60
0.659898
contador = 1 acumulador = 1 while contador <= 10: print(contador,acumulador) if contador>5: break contador+=1 else : print("Cheguei ao final do programa e usei o laço else")
true
true
1c45d36bee4e9d54b5cbf2aeacd682f4ac03ae3c
1,228
py
Python
scipy/special/_precompute/utils.py
smola/scipy
ff8b9d9e87a585a820846d7f459d6156ba621c4d
[ "BSD-3-Clause" ]
1
2020-02-26T12:15:51.000Z
2020-02-26T12:15:51.000Z
scipy/special/_precompute/utils.py
smola/scipy
ff8b9d9e87a585a820846d7f459d6156ba621c4d
[ "BSD-3-Clause" ]
null
null
null
scipy/special/_precompute/utils.py
smola/scipy
ff8b9d9e87a585a820846d7f459d6156ba621c4d
[ "BSD-3-Clause" ]
null
null
null
from __future__ import division, print_function, absolute_import from numpy.testing import suppress_warnings try: import mpmath as mp except ImportError: pass try: # Can remove when sympy #11255 is resolved; see # https://github.com/sympy/sympy/issues/11255 with suppress_warnings() as sup: sup.filter(DeprecationWarning, "inspect.getargspec.. is deprecated") from sympy.abc import x except ImportError: pass def lagrange_inversion(a): """Given a series f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1), use the Lagrange inversion formula to compute a series g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1) so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so necessarily b[0] = 0 too. The algorithm is naive and could be improved, but speed isn't an issue here and it's easy to read. """ n = len(a) f = sum(a[i]*x**i for i in range(len(a))) h = (x/f).series(x, 0, n).removeO() hpower = [h**0] for k in range(n): hpower.append((hpower[-1]*h).expand()) b = [mp.mpf(0)] for k in range(1, n): b.append(hpower[k].coeff(x, k - 1)/k) b = map(lambda x: mp.mpf(x), b) return b
26.12766
76
0.593648
from __future__ import division, print_function, absolute_import from numpy.testing import suppress_warnings try: import mpmath as mp except ImportError: pass try: _warnings() as sup: sup.filter(DeprecationWarning, "inspect.getargspec.. is deprecated") from sympy.abc import x except ImportError: pass def lagrange_inversion(a): n = len(a) f = sum(a[i]*x**i for i in range(len(a))) h = (x/f).series(x, 0, n).removeO() hpower = [h**0] for k in range(n): hpower.append((hpower[-1]*h).expand()) b = [mp.mpf(0)] for k in range(1, n): b.append(hpower[k].coeff(x, k - 1)/k) b = map(lambda x: mp.mpf(x), b) return b
true
true
1c45d3b6ea710c0f740d2889df7c9d12df1dfe29
405
py
Python
minesweeperapi/minesweeperapi/wsgi.py
Olaussen/minesweeper-online-api
36ba250b65b19cc4f0d8be36b3f84faf3f692035
[ "MIT" ]
4
2020-04-15T18:21:36.000Z
2020-04-24T12:24:03.000Z
minesweeperapi/minesweeperapi/wsgi.py
Olaussen/minesweeper-online-api
36ba250b65b19cc4f0d8be36b3f84faf3f692035
[ "MIT" ]
4
2021-03-29T23:56:40.000Z
2021-09-22T19:00:36.000Z
minesweeperapi/minesweeperapi/wsgi.py
Angstboksen/minesweeper-online-api
36ba250b65b19cc4f0d8be36b3f84faf3f692035
[ "MIT" ]
null
null
null
""" WSGI config for minesweeperapi project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'minesweeperapi.settings') application = get_wsgi_application()
23.823529
78
0.792593
import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'minesweeperapi.settings') application = get_wsgi_application()
true
true
1c45d4943574fa4c951c551453898c091153b659
1,917
py
Python
trax/rl/__init__.py
koz4k2/trax
548f671fa3804cb86154ac504fb0c6c4269b42c7
[ "Apache-2.0" ]
2
2020-02-05T09:27:29.000Z
2020-02-05T09:27:49.000Z
trax/rl/__init__.py
koz4k2/trax
548f671fa3804cb86154ac504fb0c6c4269b42c7
[ "Apache-2.0" ]
null
null
null
trax/rl/__init__.py
koz4k2/trax
548f671fa3804cb86154ac504fb0c6c4269b42c7
[ "Apache-2.0" ]
1
2021-07-08T16:35:30.000Z
2021-07-08T16:35:30.000Z
# coding=utf-8 # Copyright 2019 The Trax Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Trax RL library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gin from trax.rl import simulated_env_problem def configure_rl(*args, **kwargs): kwargs['module'] = 'trax.rl' return gin.external_configurable(*args, **kwargs) def configure_simulated_env_problem(*args, **kwargs): kwargs['blacklist'] = [ 'batch_size', 'observation_space', 'action_space', 'reward_range', 'discrete_rewards', 'history_stream', 'output_dir'] return configure_rl(*args, **kwargs) # pylint: disable=invalid-name RawSimulatedEnvProblem = configure_simulated_env_problem( simulated_env_problem.RawSimulatedEnvProblem) SerializedSequenceSimulatedEnvProblem = configure_simulated_env_problem( simulated_env_problem.SerializedSequenceSimulatedEnvProblem) # pylint: disable=invalid-name cartpole_done_fn = configure_rl(simulated_env_problem.cartpole_done_fn) cartpole_reward_fn = configure_rl(simulated_env_problem.cartpole_reward_fn) acrobot_done_fn = configure_rl(simulated_env_problem.acrobot_done_fn) acrobot_reward_fn = configure_rl(simulated_env_problem.acrobot_reward_fn) onlinetune_done_fn = configure_rl(simulated_env_problem.onlinetune_done_fn) onlinetune_reward_fn = configure_rl(simulated_env_problem.onlinetune_reward_fn)
36.169811
79
0.806468
from __future__ import absolute_import from __future__ import division from __future__ import print_function import gin from trax.rl import simulated_env_problem def configure_rl(*args, **kwargs): kwargs['module'] = 'trax.rl' return gin.external_configurable(*args, **kwargs) def configure_simulated_env_problem(*args, **kwargs): kwargs['blacklist'] = [ 'batch_size', 'observation_space', 'action_space', 'reward_range', 'discrete_rewards', 'history_stream', 'output_dir'] return configure_rl(*args, **kwargs) RawSimulatedEnvProblem = configure_simulated_env_problem( simulated_env_problem.RawSimulatedEnvProblem) SerializedSequenceSimulatedEnvProblem = configure_simulated_env_problem( simulated_env_problem.SerializedSequenceSimulatedEnvProblem) cartpole_done_fn = configure_rl(simulated_env_problem.cartpole_done_fn) cartpole_reward_fn = configure_rl(simulated_env_problem.cartpole_reward_fn) acrobot_done_fn = configure_rl(simulated_env_problem.acrobot_done_fn) acrobot_reward_fn = configure_rl(simulated_env_problem.acrobot_reward_fn) onlinetune_done_fn = configure_rl(simulated_env_problem.onlinetune_done_fn) onlinetune_reward_fn = configure_rl(simulated_env_problem.onlinetune_reward_fn)
true
true
1c45d52d487074570c308737c78dd22714356d93
4,508
py
Python
play_with_mpv.py
davehorner/play-with-mpv
89ad8de0faf10a175fbd8cf0792706e39ed87fae
[ "Unlicense" ]
null
null
null
play_with_mpv.py
davehorner/play-with-mpv
89ad8de0faf10a175fbd8cf0792706e39ed87fae
[ "Unlicense" ]
null
null
null
play_with_mpv.py
davehorner/play-with-mpv
89ad8de0faf10a175fbd8cf0792706e39ed87fae
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python # Plays MPV when instructed to by a chrome extension =] import sys import argparse from subprocess import Popen FileNotFoundError = IOError if sys.version_info[0] < 3: # python 2 import BaseHTTPServer import urlparse class CompatibilityMixin: def send_body(self, msg): self.wfile.write(msg+'\n') self.wfile.close() else: # python 3 import http.server as BaseHTTPServer import urllib.parse as urlparse class CompatibilityMixin: def send_body(self, msg): self.wfile.write(bytes(msg+'\n', 'utf-8')) class Handler(BaseHTTPServer.BaseHTTPRequestHandler, CompatibilityMixin): def respond(self, code, body=None): self.send_response(code) self.send_header("Content-type", "text/plain") self.end_headers() if body: self.send_body(body) def do_GET(self): try: url = urlparse.urlparse(self.path) query = urlparse.parse_qs(url.query) except: query = {} if query.get('mpv_args'): print("MPV ARGS:", query.get('mpv_args')) if "play_url" in query: urls = str(query["play_url"][0]) if urls.startswith('magnet:') or urls.endswith('.torrent'): try: pipe = Popen(['peerflix', '-k', urls, '--', '--force-window'] + query.get("mpv_args", [])) except FileNotFoundError as e: missing_bin('peerflix') else: try: pipe = Popen(['mpv', urls, '--force-window'] + query.get("mpv_args", [])) except FileNotFoundError as e: missing_bin('mpv') self.respond(200, "playing...") elif "cast_url" in query: urls = str(query["cast_url"][0]) if urls.startswith('magnet:') or urls.endswith('.torrent'): print(" === WARNING: Casting torrents not yet fully supported!") try: with Popen(['mkchromecast', '--video', '--source-url', 'http://localhost:8888']): pass except FileNotFoundError as e: missing_bin('mkchromecast') pipe.terminate() else: try: pipe = Popen(['mkchromecast', '--video', '-y', urls]) except FileNotFoundError as e: missing_bin('mkchromecast') self.respond(200, "casting...") elif "fairuse_url" in query: urls = str(query["fairuse_url"][0]) location = query.get("location", ['~/Downloads/'])[0] if "%" not in location: location += "%(title)s.%(ext)s" print("downloading ", urls, "to", location) if urls.startswith('magnet:') or urls.endswith('.torrent'): msg = " === ERROR: Downloading torrents not yet supported!" print(msg) self.respond(400, msg) else: try: pipe = Popen(['youtube-dl', urls, '-o', location] + query.get('ytdl_args', [])) except FileNotFoundError as e: missing_bin('youtube-dl') self.respond(200, "downloading...") else: self.respond(400) def missing_bin(bin): print("======================") print("ERROR: "+bin.upper()+" does not appear to be installed correctly!") print("please ensure you can launch '"+bin+"' in the terminal.") print("======================") def start(): parser = argparse.ArgumentParser(description='Plays MPV when instructed to by a browser extension.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--port', type=int, default=7531, help='The port to listen on.') parser.add_argument('--public', action='store_true', help='Accept traffic from other comuters.') args = parser.parse_args() hostname = '0.0.0.0' if args.public else 'localhost' httpd = BaseHTTPServer.HTTPServer((hostname, args.port), Handler) print("serving on {}:{}".format(hostname, args.port)) try: httpd.serve_forever() except KeyboardInterrupt: print(" shutting down...") httpd.shutdown() if __name__ == '__main__': start()
37.566667
160
0.533718
import sys import argparse from subprocess import Popen FileNotFoundError = IOError if sys.version_info[0] < 3: import BaseHTTPServer import urlparse class CompatibilityMixin: def send_body(self, msg): self.wfile.write(msg+'\n') self.wfile.close() else: import http.server as BaseHTTPServer import urllib.parse as urlparse class CompatibilityMixin: def send_body(self, msg): self.wfile.write(bytes(msg+'\n', 'utf-8')) class Handler(BaseHTTPServer.BaseHTTPRequestHandler, CompatibilityMixin): def respond(self, code, body=None): self.send_response(code) self.send_header("Content-type", "text/plain") self.end_headers() if body: self.send_body(body) def do_GET(self): try: url = urlparse.urlparse(self.path) query = urlparse.parse_qs(url.query) except: query = {} if query.get('mpv_args'): print("MPV ARGS:", query.get('mpv_args')) if "play_url" in query: urls = str(query["play_url"][0]) if urls.startswith('magnet:') or urls.endswith('.torrent'): try: pipe = Popen(['peerflix', '-k', urls, '--', '--force-window'] + query.get("mpv_args", [])) except FileNotFoundError as e: missing_bin('peerflix') else: try: pipe = Popen(['mpv', urls, '--force-window'] + query.get("mpv_args", [])) except FileNotFoundError as e: missing_bin('mpv') self.respond(200, "playing...") elif "cast_url" in query: urls = str(query["cast_url"][0]) if urls.startswith('magnet:') or urls.endswith('.torrent'): print(" === WARNING: Casting torrents not yet fully supported!") try: with Popen(['mkchromecast', '--video', '--source-url', 'http://localhost:8888']): pass except FileNotFoundError as e: missing_bin('mkchromecast') pipe.terminate() else: try: pipe = Popen(['mkchromecast', '--video', '-y', urls]) except FileNotFoundError as e: missing_bin('mkchromecast') self.respond(200, "casting...") elif "fairuse_url" in query: urls = str(query["fairuse_url"][0]) location = query.get("location", ['~/Downloads/'])[0] if "%" not in location: location += "%(title)s.%(ext)s" print("downloading ", urls, "to", location) if urls.startswith('magnet:') or urls.endswith('.torrent'): msg = " === ERROR: Downloading torrents not yet supported!" print(msg) self.respond(400, msg) else: try: pipe = Popen(['youtube-dl', urls, '-o', location] + query.get('ytdl_args', [])) except FileNotFoundError as e: missing_bin('youtube-dl') self.respond(200, "downloading...") else: self.respond(400) def missing_bin(bin): print("======================") print("ERROR: "+bin.upper()+" does not appear to be installed correctly!") print("please ensure you can launch '"+bin+"' in the terminal.") print("======================") def start(): parser = argparse.ArgumentParser(description='Plays MPV when instructed to by a browser extension.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--port', type=int, default=7531, help='The port to listen on.') parser.add_argument('--public', action='store_true', help='Accept traffic from other comuters.') args = parser.parse_args() hostname = '0.0.0.0' if args.public else 'localhost' httpd = BaseHTTPServer.HTTPServer((hostname, args.port), Handler) print("serving on {}:{}".format(hostname, args.port)) try: httpd.serve_forever() except KeyboardInterrupt: print(" shutting down...") httpd.shutdown() if __name__ == '__main__': start()
true
true
1c45d5350b388a12e7757dc666f9344c210a8547
42,275
py
Python
pypy/module/_winreg/interp_winreg.py
olliemath/pypy
8b873bd0b8bf76075aba3d915c260789f26f5788
[ "Apache-2.0", "OpenSSL" ]
null
null
null
pypy/module/_winreg/interp_winreg.py
olliemath/pypy
8b873bd0b8bf76075aba3d915c260789f26f5788
[ "Apache-2.0", "OpenSSL" ]
null
null
null
pypy/module/_winreg/interp_winreg.py
olliemath/pypy
8b873bd0b8bf76075aba3d915c260789f26f5788
[ "Apache-2.0", "OpenSSL" ]
null
null
null
from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rwinreg, rwin32, rstring from rpython.rlib.rarithmetic import r_uint, r_ulonglong, intmask from rpython.rlib.buffer import ByteBuffer from rpython.rlib.rutf8 import check_utf8 from pypy.interpreter.baseobjspace import W_Root, BufferInterfaceNotFound from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.unicodehelper import ( str_decode_utf_16, utf8_encode_utf_16) from pypy.module._codecs.interp_codecs import CodecState from rpython.translator.tool.cbuild import ExternalCompilationInfo # wrappers needed to call the reflection functions loaded at runtime # using WINAPI convention eci = ExternalCompilationInfo( includes=['windows.h'], post_include_bits=[ "RPY_EXTERN LONG\n" "pypy_RegChangeReflectionKey(FARPROC address, HKEY key);\n" "RPY_EXTERN LONG\n" "pypy_RegQueryReflectionKey(FARPROC address, HKEY key, LPBOOL isDisabled);\n" "RPY_EXTERN LONG\n" "pypy_RegDeleteKeyExW(FARPROC address, HKEY key, LPCWSTR subkey,\n" " REGSAM sam, DWORD reserved);\n" ], separate_module_sources=[''' LONG pypy_RegChangeReflectionKey(FARPROC address, HKEY key) { LONG (WINAPI *func)(HKEY); *(FARPROC*)&func = address; return func(key); } LONG pypy_RegQueryReflectionKey(FARPROC address, HKEY key, LPBOOL isDisabled) { LONG (WINAPI *func)(HKEY, LPBOOL); *(FARPROC*)&func = address; return func(key, isDisabled); } LONG pypy_RegDeleteKeyExW(FARPROC address, HKEY key, LPCWSTR subkey, REGSAM sam, DWORD reserved) { LONG (WINAPI *func)(HKEY, LPCWSTR, REGSAM, DWORD); *(FARPROC*)&func = address; return func(key, subkey, sam, reserved); } '''], ) pypy_RegChangeReflectionKey = rffi.llexternal( 'pypy_RegChangeReflectionKey', [rffi.VOIDP, rwinreg.HKEY], rffi.LONG, compilation_info=eci) pypy_RegQueryReflectionKey = rffi.llexternal( 'pypy_RegQueryReflectionKey', [rffi.VOIDP, rwinreg.HKEY, rwin32.LPBOOL], rffi.LONG, compilation_info=eci) pypy_RegDeleteKeyExW = rffi.llexternal( 'pypy_RegDeleteKeyExW', [rffi.VOIDP, rwinreg.HKEY, rffi.CWCHARP, rwinreg.REGSAM, rwin32.DWORD], rffi.LONG, compilation_info=eci) def raiseWindowsError(space, errcode, context): message = rwin32.FormatErrorW(errcode) w_errcode = space.newint(errcode) w_t = space.newtuple([w_errcode, space.newtext(*message), space.w_None, w_errcode]) raise OperationError(space.w_WindowsError, w_t) class W_HKEY(W_Root): def __init__(self, space, hkey): self.hkey = hkey self.space = space self.register_finalizer(space) def _finalize_(self): # ignore errors try: self.Close(self.space) except: pass def as_int(self): return rffi.cast(rffi.SIZE_T, self.hkey) def descr_bool(self, space): return space.newbool(self.as_int() != 0) def descr_handle_get(self, space): return space.newint(self.as_int()) def descr_repr(self, space): return space.newtext("<PyHKEY:0x%x>" % (self.as_int(),)) def descr_int(self, space): return space.newint(self.as_int()) def descr__enter__(self, space): return self def descr__exit__(self, space, __args__): CloseKey(space, self) def Close(self, space): """key.Close() - Closes the underlying Windows handle. If the handle is already closed, no error is raised.""" CloseKey(space, self) def Detach(self, space): """int = key.Detach() - Detaches the Windows handle from the handle object. The result is the value of the handle before it is detached. If the handle is already detached, this will return zero. After calling this function, the handle is effectively invalidated, but the handle is not closed. You would call this function when you need the underlying win32 handle to exist beyond the lifetime of the handle object.""" key = self.as_int() self.hkey = rwin32.NULL_HANDLE return space.newint(key) @unwrap_spec(key=int) def new_HKEY(space, w_subtype, key): hkey = rffi.cast(rwinreg.HKEY, key) return W_HKEY(space, hkey) descr_HKEY_new = interp2app(new_HKEY) W_HKEY.typedef = TypeDef( "winreg.HKEYType", __doc__="""\ PyHKEY Object - A Python object, representing a win32 registry key. This object wraps a Windows HKEY object, automatically closing it when the object is destroyed. To guarantee cleanup, you can call either the Close() method on the PyHKEY, or the CloseKey() method. All functions which accept a handle object also accept an integer - however, use of the handle object is encouraged. Functions: Close() - Closes the underlying handle. Detach() - Returns the integer Win32 handle, detaching it from the object Properties: handle - The integer Win32 handle. Operations: __bool__ - Handles with an open object return true, otherwise false. __int__ - Converting a handle to an integer returns the Win32 handle. __cmp__ - Handle objects are compared using the handle value.""", __new__=descr_HKEY_new, __repr__=interp2app(W_HKEY.descr_repr), __int__=interp2app(W_HKEY.descr_int), __bool__=interp2app(W_HKEY.descr_bool), __enter__=interp2app(W_HKEY.descr__enter__), __exit__=interp2app(W_HKEY.descr__exit__), handle=GetSetProperty(W_HKEY.descr_handle_get), Close=interp2app(W_HKEY.Close), Detach=interp2app(W_HKEY.Detach), ) def hkey_w(w_hkey, space): if space.is_w(w_hkey, space.w_None): raise oefmt(space.w_TypeError, "None is not a valid HKEY in this context") elif isinstance(w_hkey, W_HKEY): return w_hkey.hkey elif space.isinstance_w(w_hkey, space.w_int): if space.is_true(space.lt(w_hkey, space.newint(0))): return rffi.cast(rwinreg.HKEY, space.int_w(w_hkey)) return rffi.cast(rwinreg.HKEY, space.uint_w(w_hkey)) else: raise oefmt(space.w_TypeError, "The object is not a PyHKEY object") def CloseKey(space, w_hkey): """CloseKey(hkey) - Closes a previously opened registry key. The hkey argument specifies a previously opened key. Note that if the key is not closed using this method, it will be closed when the hkey object is destroyed by Python.""" hkey = hkey_w(w_hkey, space) if hkey: ret = rwinreg.RegCloseKey(hkey) if ret != 0: raiseWindowsError(space, ret, 'RegCloseKey') if isinstance(w_hkey, W_HKEY): space.interp_w(W_HKEY, w_hkey).hkey = rwin32.NULL_HANDLE def FlushKey(space, w_hkey): """FlushKey(key) - Writes all the attributes of a key to the registry. key is an already open key, or any one of the predefined HKEY_* constants. It is not necessary to call RegFlushKey to change a key. Registry changes are flushed to disk by the registry using its lazy flusher. Registry changes are also flushed to disk at system shutdown. Unlike CloseKey(), the FlushKey() method returns only when all the data has been written to the registry. An application should only call FlushKey() if it requires absolute certainty that registry changes are on disk. If you don't know whether a FlushKey() call is required, it probably isn't.""" hkey = hkey_w(w_hkey, space) if hkey: ret = rwinreg.RegFlushKey(hkey) if ret != 0: raiseWindowsError(space, ret, 'RegFlushKey') @unwrap_spec(subkey="unicode", filename="unicode") def LoadKey(space, w_hkey, subkey, filename): """LoadKey(key, sub_key, file_name) - Creates a subkey under the specified key and stores registration information from a specified file into that subkey. key is an already open key, or any one of the predefined HKEY_* constants. sub_key is a string that identifies the sub_key to load file_name is the name of the file to load registry data from. This file must have been created with the SaveKey() function. Under the file allocation table (FAT) file system, the filename may not have an extension. A call to LoadKey() fails if the calling process does not have the SE_RESTORE_PRIVILEGE privilege. If key is a handle returned by ConnectRegistry(), then the path specified in fileName is relative to the remote computer. The docs imply key must be in the HKEY_USER or HKEY_LOCAL_MACHINE tree""" # XXX should filename use space.fsencode_w? hkey = hkey_w(w_hkey, space) with rffi.scoped_unicode2wcharp(subkey) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) with rffi.scoped_unicode2wcharp(filename) as wide_filename: c_filename = rffi.cast(rffi.CWCHARP, wide_filename) ret = rwinreg.RegLoadKeyW(hkey, c_subkey, c_filename) if ret != 0: raiseWindowsError(space, ret, 'RegLoadKey') @unwrap_spec(filename="unicode") def SaveKey(space, w_hkey, filename): """ SaveKey(key, file_name) - Saves the specified key, and all its subkeys to the specified file. key is an already open key, or any one of the predefined HKEY_* constants. file_name is the name of the file to save registry data to. This file cannot already exist. If this filename includes an extension, it cannot be used on file allocation table (FAT) file systems by the LoadKey(), ReplaceKey() or RestoreKey() methods. If key represents a key on a remote computer, the path described by file_name is relative to the remote computer. The caller of this method must possess the SeBackupPrivilege security privilege. This function passes NULL for security_attributes to the API.""" hkey = hkey_w(w_hkey, space) with rffi.scoped_unicode2wcharp(filename) as wide_filename: c_filename = rffi.cast(rffi.CWCHARP, wide_filename) ret = rwinreg.RegSaveKeyW(hkey, c_filename, None) if ret != 0: raiseWindowsError(space, ret, 'RegSaveKey') @unwrap_spec(typ=int) def SetValue(space, w_hkey, w_subkey, typ, w_value): """ SetValue(key, sub_key, type, value) - Associates a value with a specified key. key is an already open key, or any one of the predefined HKEY_* constants. sub_key is a string that names the subkey with which the value is associated. type is an integer that specifies the type of the data. Currently this must be REG_SZ, meaning only strings are supported. value is a string that specifies the new value. If the key specified by the sub_key parameter does not exist, the SetValue function creates it. Value lengths are limited by available memory. Long values (more than 2048 bytes) should be stored as files with the filenames stored in the configuration registry. This helps the registry perform efficiently. The key identified by the key parameter must have been opened with KEY_SET_VALUE access.""" if typ != rwinreg.REG_SZ: raise oefmt(space.w_ValueError, "Type must be winreg.REG_SZ") hkey = hkey_w(w_hkey, space) state = space.fromcache(CodecState) errh = state.encode_error_handler utf8 = space.utf8_w(w_subkey) subkeyW = utf8_encode_utf_16(utf8 + '\x00', 'strict', errh, allow_surrogates=True) utf8 = space.utf8_w(w_value) valueW = utf8_encode_utf_16(utf8 + '\x00', 'strict', errh, allow_surrogates=True) valueL = space.len_w(w_value) # Add an offset to remove the BOM from the native utf16 wstr with rffi.scoped_nonmovingbuffer(subkeyW) as subkeyP0: subkeyP = rffi.cast(rffi.CWCHARP, rffi.ptradd(subkeyP0, 2)) with rffi.scoped_nonmovingbuffer(valueW) as valueP0: valueP = rffi.cast(rffi.CWCHARP, rffi.ptradd(valueP0, 2)) ret = rwinreg.RegSetValueW(hkey, subkeyP, rwinreg.REG_SZ, valueP, valueL) if ret != 0: raiseWindowsError(space, ret, 'RegSetValue') def QueryValue(space, w_hkey, w_subkey): """ string = QueryValue(key, sub_key) - retrieves the unnamed value for a key. key is an already open key, or any one of the predefined HKEY_* constants. sub_key is a string that holds the name of the subkey with which the value is associated. If this parameter is None or empty, the function retrieves the value set by the SetValue() method for the key identified by key. Values in the registry have name, type, and data components. This method retrieves the data for a key's first value that has a NULL name. But the underlying API call doesn't return the type: Lame, DONT USE THIS!!!""" hkey = hkey_w(w_hkey, space) if space.is_w(w_subkey, space.w_None): subkey = None else: subkey = space.utf8_w(w_subkey).decode('utf8') with rffi.scoped_unicode2wcharp(subkey) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) with lltype.scoped_alloc(rwin32.PLONG.TO, 1) as bufsize_p: bufsize_p[0] = rffi.cast(rwin32.LONG, 0) ret = rwinreg.RegQueryValueW(hkey, c_subkey, None, bufsize_p) bufSize = intmask(bufsize_p[0]) if ret == rwinreg.ERROR_MORE_DATA: bufSize = 256 elif ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') while True: buf = ByteBuffer(bufSize) bufP = rffi.cast(rffi.CWCHARP, buf.get_raw_address()) ret = rwinreg.RegQueryValueW(hkey, c_subkey, bufP, bufsize_p) if ret == rwinreg.ERROR_MORE_DATA: # Resize and retry bufSize *= 2 bufsize_p[0] = rffi.cast(rwin32.LONG, bufSize) continue if ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') utf8, lgt = wbuf_to_utf8(space, buf[0:intmask(bufsize_p[0])]) return space.newtext(utf8, lgt) def convert_to_regdata(space, w_value, typ): '''returns CCHARP, int''' buf = None if typ == rwinreg.REG_DWORD: if space.is_none(w_value) or space.isinstance_w(w_value, space.w_int): if space.is_none(w_value): value = r_uint(0) else: value = space.c_uint_w(w_value) buflen = rffi.sizeof(rwin32.DWORD) buf1 = lltype.malloc(rffi.CArray(rwin32.DWORD), 1, flavor='raw') buf1[0] = rffi.cast(rffi.UINT, value) buf = rffi.cast(rffi.CCHARP, buf1) elif typ == rwinreg.REG_QWORD: if space.is_none(w_value) or space.isinstance_w(w_value, space.w_int): if space.is_none(w_value): value = r_ulonglong(0) else: value = space.r_ulonglong_w(w_value) buflen = rffi.sizeof(rffi.ULONGLONG) buf1 = lltype.malloc(rffi.CArray(rffi.ULONGLONG), 1, flavor='raw') buf1[0] = rffi.cast(rffi.ULONGLONG, value) buf = rffi.cast(rffi.CCHARP, buf1) elif typ == rwinreg.REG_SZ or typ == rwinreg.REG_EXPAND_SZ: if space.is_w(w_value, space.w_None): buflen = 1 buf = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') buf[0] = '\0' else: buf = rffi.unicode2wcharp(space.utf8_w(w_value).decode('utf8')) buf = rffi.cast(rffi.CCHARP, buf) buflen = (space.len_w(w_value) * 2) + 1 elif typ == rwinreg.REG_MULTI_SZ: if space.is_w(w_value, space.w_None): buflen = 1 buf = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') buf[0] = '\0' elif space.isinstance_w(w_value, space.w_list): strings = [] buflen = 0 # unwrap strings and compute total size w_iter = space.iter(w_value) while True: try: w_item = space.next(w_iter) item = space.utf8_w(w_item).decode('utf8') strings.append(item) buflen += 2 * (len(item) + 1) except OperationError as e: if not e.match(space, space.w_StopIteration): raise # re-raise other app-level exceptions break buflen += 2 buf = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') # Now copy data buflen = 0 for string in strings: with rffi.scoped_unicode2wcharp(string) as wchr: c_str = rffi.cast(rffi.CCHARP, wchr) for i in range(len(string) * 2): buf[buflen + i] = c_str[i] buflen += (len(string) + 1) * 2 buf[buflen - 1] = '\0' buf[buflen - 2] = '\0' buflen += 2 buf[buflen - 1] = '\0' buf[buflen - 2] = '\0' else: # REG_BINARY and ALL unknown data types. if space.is_w(w_value, space.w_None): buflen = 0 buf = lltype.nullptr(rffi.CCHARP.TO) else: try: value = w_value.buffer_w(space, space.BUF_SIMPLE) except BufferInterfaceNotFound: raise oefmt(space.w_TypeError, "Objects of type '%T' can not be used as binary " "registry values", w_value) else: value = value.as_str() buflen = len(value) buf = rffi.str2charp(value) if buf is not None: return rffi.cast(rffi.CWCHARP, buf), buflen raise oefmt(space.w_ValueError, "Could not convert the data to the specified type") def wbuf_to_utf8(space, wbuf): state = space.fromcache(CodecState) errh = state.decode_error_handler utf8, lgt, pos = str_decode_utf_16(wbuf, 'surrogatepass', final=True, errorhandler=errh) if len(utf8) > 1 and utf8[len(utf8) - 1] == '\x00': # trim off one trailing '\x00' newlen = len(utf8) - 1 assert newlen >=0 utf8 = utf8[0:newlen] lgt -= 1 return utf8, lgt def convert_from_regdata(space, buf, buflen, typ): if typ == rwinreg.REG_DWORD: if not buflen: return space.newint(0) d = rffi.cast(rwin32.LPDWORD, buf.get_raw_address())[0] return space.newint(d) elif typ == rwinreg.REG_QWORD: if not buflen: return space.newint(0) d = rffi.cast(rffi.ULONGLONGP, buf.get_raw_address())[0] return space.newint(d) elif typ == rwinreg.REG_SZ or typ == rwinreg.REG_EXPAND_SZ: if not buflen: return space.newtext('', 0) even = (buflen // 2) * 2 utf8, lgt = wbuf_to_utf8(space, buf[0:even]) # bpo-25778, truncate at first NULL to match reg.exe behaviour. i = 0 utf8len = len(utf8) while i < utf8len: if utf8[i] == '\x00': utf8 = utf8[0:i] lgt = check_utf8(utf8, True) break i += 1 w_s = space.newtext(utf8, lgt) return w_s elif typ == rwinreg.REG_MULTI_SZ: if not buflen: return space.newlist([]) even = (buflen // 2) * 2 utf8, lgt = wbuf_to_utf8(space, buf[0:even]) parts = rstring.split(utf8, '\0') partslen = len(parts) if partslen > 0 and parts[partslen-1] == '': partslen -= 1 ret = [] i = 0 while i < partslen: lgt = check_utf8(parts[i], True) ret.append(space.newtext(parts[i], lgt)) i += 1 return space.newlist(ret) else: # REG_BINARY and all other types if buflen == 0: return space.w_None else: return space.newbytes(buf[0:buflen]) @unwrap_spec(value_name="unicode", typ=int) def SetValueEx(space, w_hkey, value_name, w_reserved, typ, w_value): """ SetValueEx(key, value_name, reserved, type, value) - Stores data in the value field of an open registry key. key is an already open key, or any one of the predefined HKEY_* constants. value_name is a string containing the name of the value to set, or None type is an integer that specifies the type of the data. This should be one of: REG_BINARY -- Binary data in any form. REG_DWORD -- A 32-bit number. REG_DWORD_LITTLE_ENDIAN -- A 32-bit number in little-endian format. REG_DWORD_BIG_ENDIAN -- A 32-bit number in big-endian format. REG_QWORD -- A 64-bit number. REG_QWORD_LITTLE_ENDIAN -- A 64-bit number in little-endian format. REG_EXPAND_SZ -- A null-terminated string that contains unexpanded references to environment variables (for example, %PATH%). REG_LINK -- A Unicode symbolic link. REG_MULTI_SZ -- An sequence of null-terminated strings, terminated by two null characters. Note that Python handles this termination automatically. REG_NONE -- No defined value type. REG_RESOURCE_LIST -- A device-driver resource list. REG_SZ -- A null-terminated string. reserved can be anything - zero is always passed to the API. value is a string that specifies the new value. This method can also set additional value and type information for the specified key. The key identified by the key parameter must have been opened with KEY_SET_VALUE access. To open the key, use the CreateKeyEx() or OpenKeyEx() methods. Value lengths are limited by available memory. Long values (more than 2048 bytes) should be stored as files with the filenames stored in the configuration registry. This helps the registry perform efficiently.""" hkey = hkey_w(w_hkey, space) buf, buflen = convert_to_regdata(space, w_value, typ) try: with rffi.scoped_unicode2wcharp(value_name) as wide_vn: c_vn = rffi.cast(rffi.CWCHARP, wide_vn) ret = rwinreg.RegSetValueExW(hkey, c_vn, 0, typ, buf, buflen) finally: if buf != lltype.nullptr(rffi.CWCHARP.TO): lltype.free(buf, flavor='raw') if ret != 0: raiseWindowsError(space, ret, 'RegSetValueEx') def QueryValueEx(space, w_hkey, w_subkey): """ value,type_id = QueryValueEx(key, value_name) - Retrieves the type and data for a specified value name associated with an open registry key. key is an already open key, or any one of the predefined HKEY_* constants. value_name is a string indicating the value to query""" hkey = hkey_w(w_hkey, space) if space.is_w(w_subkey, space.w_None): subkey = None else: subkey = space.utf8_w(w_subkey).decode('utf8') null_dword = lltype.nullptr(rwin32.LPDWORD.TO) with rffi.scoped_unicode2wcharp(subkey) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as dataSize: ret = rwinreg.RegQueryValueExW(hkey, c_subkey, null_dword, null_dword, None, dataSize) bufSize = intmask(dataSize[0]) if ret == rwinreg.ERROR_MORE_DATA: # Copy CPython behaviour, otherwise bufSize can be 0 bufSize = 256 elif ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') while True: dataBuf = ByteBuffer(bufSize) dataBufP = rffi.cast(rffi.CWCHARP, dataBuf.get_raw_address()) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as retType: ret = rwinreg.RegQueryValueExW(hkey, c_subkey, null_dword, retType, dataBufP, dataSize) if ret == rwinreg.ERROR_MORE_DATA: # Resize and retry bufSize *= 2 dataSize[0] = rffi.cast(rwin32.DWORD, bufSize) continue if ret != 0: raiseWindowsError(space, ret, 'RegQueryValueEx') length = intmask(dataSize[0]) ret_type = intmask(retType[0]) return space.newtuple([ convert_from_regdata(space, dataBuf, length, ret_type), space.newint(intmask(ret_type)), ]) @unwrap_spec(subkey="unicode") def CreateKey(space, w_hkey, subkey): """key = CreateKey(key, sub_key) - Creates or opens the specified key. key is an already open key, or one of the predefined HKEY_* constants sub_key is a string that names the key this method opens or creates. If key is one of the predefined keys, sub_key may be None. In that case, the handle returned is the same key handle passed in to the function. If the key already exists, this function opens the existing key The return value is the handle of the opened key. If the function fails, an exception is raised.""" hkey = hkey_w(w_hkey, space) with rffi.scoped_unicode2wcharp(subkey) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegCreateKeyW(hkey, c_subkey, rethkey) if ret != 0: raiseWindowsError(space, ret, 'CreateKey') return W_HKEY(space, rethkey[0]) @unwrap_spec(sub_key="unicode", reserved=int, access=r_uint) def CreateKeyEx(space, w_key, sub_key, reserved=0, access=rwinreg.KEY_WRITE): """key = CreateKey(key, sub_key) - Creates or opens the specified key. key is an already open key, or one of the predefined HKEY_* constants sub_key is a string that names the key this method opens or creates. If key is one of the predefined keys, sub_key may be None. In that case, the handle returned is the same key handle passed in to the function. If the key already exists, this function opens the existing key The return value is the handle of the opened key. If the function fails, an exception is raised.""" hkey = hkey_w(w_key, space) with rffi.scoped_unicode2wcharp(sub_key) as wide_sub_key: c_subkey = rffi.cast(rffi.CWCHARP, wide_sub_key) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegCreateKeyExW(hkey, c_subkey, reserved, None, 0, access, None, rethkey, lltype.nullptr(rwin32.LPDWORD.TO)) if ret != 0: raiseWindowsError(space, ret, 'CreateKeyEx') return W_HKEY(space, rethkey[0]) @unwrap_spec(subkey="unicode") def DeleteKey(space, w_hkey, subkey): """ DeleteKey(key, subkey) - Deletes the specified key. key is an already open key, or any one of the predefined HKEY_* constants. sub_key is a string that must be a subkey of the key identified by the key parameter. This value must not be None, and the key may not have subkeys. This method can not delete keys with subkeys. If the method succeeds, the entire key, including all of its values, is removed. If the method fails, an EnvironmentError exception is raised.""" hkey = hkey_w(w_hkey, space) with rffi.scoped_unicode2wcharp(subkey) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) ret = rwinreg.RegDeleteKeyW(hkey, c_subkey) if ret != 0: raiseWindowsError(space, ret, 'RegDeleteKey') @unwrap_spec(subkey="unicode") def DeleteValue(space, w_hkey, subkey): """DeleteValue(key, value) - Removes a named value from a registry key. key is an already open key, or any one of the predefined HKEY_* constants. value is a string that identifies the value to remove.""" hkey = hkey_w(w_hkey, space) with rffi.scoped_unicode2wcharp(subkey) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) ret = rwinreg.RegDeleteValueW(hkey, c_subkey) if ret != 0: raiseWindowsError(space, ret, 'RegDeleteValue') @unwrap_spec(reserved=int, access=r_uint) def OpenKey(space, w_key, w_sub_key, reserved=0, access=rwinreg.KEY_READ): """ key = OpenKey(key, sub_key, res = 0, sam = KEY_READ) - Opens the specified key. key is an already open key, or any one of the predefined HKEY_* constants. sub_key is a string that identifies the sub_key to open res is a reserved integer, and must be zero. Default is zero. sam is an integer that specifies an access mask that describes the desired security access for the key. Default is KEY_READ The result is a new handle to the specified key If the function fails, an EnvironmentError exception is raised.""" hkey = hkey_w(w_key, space) utf8 = space.utf8_w(w_sub_key) state = space.fromcache(CodecState) errh = state.encode_error_handler subkeyW = utf8_encode_utf_16(utf8 + '\x00', 'strict', errh, allow_surrogates=True) with rffi.scoped_nonmovingbuffer(subkeyW) as subkeyP0: subkeyP = rffi.cast(rffi.CWCHARP, rffi.ptradd(subkeyP0, 2)) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegOpenKeyExW(hkey, subkeyP, reserved, access, rethkey) if ret != 0: raiseWindowsError(space, ret, 'RegOpenKeyEx') return W_HKEY(space, rethkey[0]) @unwrap_spec(index=int) def EnumValue(space, w_hkey, index): """tuple = EnumValue(key, index) - Enumerates values of an open registry key. key is an already open key, or any one of the predefined HKEY_* constants. index is an integer that identifies the index of the value to retrieve. The function retrieves the name of one subkey each time it is called. It is typically called repeatedly, until an EnvironmentError exception is raised, indicating no more values. The result is a tuple of 3 items: value_name is a string that identifies the value. value_data is an object that holds the value data, and whose type depends on the underlying registry type. data_type is an integer that identifies the type of the value data.""" hkey = hkey_w(w_hkey, space) null_dword = lltype.nullptr(rwin32.LPDWORD.TO) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as valueSize: with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as dataSize: ret = rwinreg.RegQueryInfoKeyW( hkey, None, null_dword, null_dword, null_dword, null_dword, null_dword, null_dword, valueSize, dataSize, null_dword, lltype.nullptr(rwin32.PFILETIME.TO)) if ret != 0: raiseWindowsError(space, ret, 'RegQueryInfoKey') # include null terminators valueSize[0] = rffi.cast(rwin32.DWORD, intmask(valueSize[0]) + 1) dataSize[0] = rffi.cast(rwin32.DWORD, intmask(dataSize[0]) + 1) bufDataSize = intmask(dataSize[0]) bufValueSize = intmask(valueSize[0]) * 2 valueBuf = ByteBuffer(bufValueSize) valueBufP = rffi.cast(rffi.CWCHARP, valueBuf.get_raw_address()) while True: dataBuf = ByteBuffer(bufDataSize) dataBufP = rffi.cast(rffi.CCHARP, dataBuf.get_raw_address()) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as retType: ret = rwinreg.RegEnumValueW( hkey, index, valueBufP, valueSize, null_dword, retType, dataBufP, dataSize) if ret == rwinreg.ERROR_MORE_DATA: # Resize and retry. For dynamic keys, the value of # dataSize[0] is useless (always 1) so do what CPython # does, except they use 2 instead of 4 bufDataSize *= 4 dataSize[0] = rffi.cast(rwin32.DWORD, bufDataSize) valueSize[0] = rffi.cast(rwin32.DWORD, bufValueSize) continue if ret != 0: raiseWindowsError(space, ret, 'RegEnumValue') length = intmask(dataSize[0]) vlen = (intmask(valueSize[0]) + 1) * 2 utf8v, lenv = wbuf_to_utf8(space, valueBuf[0:vlen]) ret_type = intmask(retType[0]) return space.newtuple([ space.newtext(utf8v, lenv), convert_from_regdata(space, dataBuf, length, ret_type), space.newint(ret_type), ]) @unwrap_spec(index=int) def EnumKey(space, w_hkey, index): """string = EnumKey(key, index) - Enumerates subkeys of an open registry key. key is an already open key, or any one of the predefined HKEY_* constants. index is an integer that identifies the index of the key to retrieve. The function retrieves the name of one subkey each time it is called. It is typically called repeatedly until an EnvironmentError exception is raised, indicating no more values are available.""" hkey = hkey_w(w_hkey, space) null_dword = lltype.nullptr(rwin32.LPDWORD.TO) # The Windows docs claim that the max key name length is 255 # characters, plus a terminating nul character. However, # empirical testing demonstrates that it is possible to # create a 256 character key that is missing the terminating # nul. RegEnumKeyEx requires a 257 character buffer to # retrieve such a key name. buf = ByteBuffer(257 * 2) bufP = rffi.cast(rwin32.LPWSTR, buf.get_raw_address()) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as valueSize: valueSize[0] = rffi.cast(rwin32.DWORD, 257) # includes NULL terminator ret = rwinreg.RegEnumKeyExW(hkey, index, bufP, valueSize, null_dword, None, null_dword, lltype.nullptr(rwin32.PFILETIME.TO)) if ret != 0: raiseWindowsError(space, ret, 'RegEnumKeyEx') vlen = intmask(valueSize[0]) * 2 utf8, lgt = wbuf_to_utf8(space, buf[0:vlen]) return space.newtext(utf8, lgt) def QueryInfoKey(space, w_hkey): """tuple = QueryInfoKey(key) - Returns information about a key. key is an already open key, or any one of the predefined HKEY_* constants. The result is a tuple of 3 items: An integer that identifies the number of sub keys this key has. An integer that identifies the number of values this key has. A long integer that identifies when the key was last modified (if available) as 100's of nanoseconds since Jan 1, 1600.""" hkey = hkey_w(w_hkey, space) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as nSubKeys: with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as nValues: with lltype.scoped_alloc(rwin32.PFILETIME.TO, 1) as ft: null_dword = lltype.nullptr(rwin32.LPDWORD.TO) ret = rwinreg.RegQueryInfoKeyW( hkey, None, null_dword, null_dword, nSubKeys, null_dword, null_dword, nValues, null_dword, null_dword, null_dword, ft) if ret != 0: raiseWindowsError(space, ret, 'RegQueryInfoKey') lgt = ((lltype.r_longlong(ft[0].c_dwHighDateTime) << 32) + lltype.r_longlong(ft[0].c_dwLowDateTime)) return space.newtuple([space.newint(nSubKeys[0]), space.newint(nValues[0]), space.newint(lgt)]) def ConnectRegistry(space, w_machine, w_hkey): """ key = ConnectRegistry(computer_name, key) Establishes a connection to a predefined registry handle on another computer. computer_name is the name of the remote computer, of the form \\\\computername. If None, the local computer is used. key is the predefined handle to connect to. The return value is the handle of the opened key. If the function fails, an EnvironmentError exception is raised.""" hkey = hkey_w(w_hkey, space) if space.is_none(w_machine): with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegConnectRegistryW(None, hkey, rethkey) if ret != 0: raiseWindowsError(space, ret, 'RegConnectRegistry') return W_HKEY(space, rethkey[0]) else: utf8 = space.utf8_w(w_machine) state = space.fromcache(CodecState) errh = state.encode_error_handler machineW = utf8_encode_utf_16(utf8 + '\x00', 'strict', errh, allow_surrogates=True) with rffi.scoped_nonmovingbuffer(machineW) as machineP0: machineP = rffi.cast(rwin32.LPWSTR, rffi.ptradd(machineP0, 2)) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegConnectRegistryW(machineP, hkey, rethkey) if ret != 0: raiseWindowsError(space, ret, 'RegConnectRegistry') return W_HKEY(space, rethkey[0]) def ExpandEnvironmentStrings(space, w_source): "string = ExpandEnvironmentStrings(string) - Expand environment vars." try: source, source_ulen = space.utf8_len_w(w_source) res, res_ulen = rwinreg.ExpandEnvironmentStrings(source, source_ulen) return space.newutf8(res, res_ulen) except WindowsError as e: raise wrap_oserror(space, e) class ReflectionFunction(object): def __init__(self, name, stdcall_wrapper): self.name = name self.handle = lltype.nullptr(rffi.VOIDP.TO) self.wrapper = stdcall_wrapper def check(self): if self.handle != lltype.nullptr(rffi.VOIDP.TO): return True from rpython.rlib.rdynload import GetModuleHandle, dlsym lib = GetModuleHandle("advapi32.dll") try: handle = dlsym(lib, self.name) except KeyError: return False self.handle = handle return True def call(self, *args): assert self.handle != lltype.nullptr(rffi.VOIDP.TO) return self.wrapper(self.handle, *args) _RegDisableReflectionKey = ReflectionFunction( "RegDisableReflectionKey", pypy_RegChangeReflectionKey) _RegEnableReflectionKey = ReflectionFunction( "RegEnableReflectionKey", pypy_RegChangeReflectionKey) _RegQueryReflectionKey = ReflectionFunction( "RegQueryReflectionKey", pypy_RegQueryReflectionKey) _RegDeleteKeyExW = ReflectionFunction("RegDeleteKeyExW", pypy_RegDeleteKeyExW) def DisableReflectionKey(space, w_key): """Disables registry reflection for 32-bit processes running on a 64-bit Operating System. Will generally raise NotImplemented if executed on a 32-bit Operating System. If the key is not on the reflection list, the function succeeds but has no effect. Disabling reflection for a key does not affect reflection of any subkeys.""" if not _RegDisableReflectionKey.check(): raise oefmt(space.w_NotImplementedError, "not implemented on this platform") else: hkey = hkey_w(w_key, space) ret = _RegDisableReflectionKey.call(hkey) if ret != 0: raiseWindowsError(space, ret, 'RegDisableReflectionKey') def EnableReflectionKey(space, w_key): """Restores registry reflection for the specified disabled key. Will generally raise NotImplemented if executed on a 32-bit Operating System. Restoring reflection for a key does not affect reflection of any subkeys.""" if not _RegEnableReflectionKey.check(): raise oefmt(space.w_NotImplementedError, "not implemented on this platform") else: hkey = hkey_w(w_key, space) ret = _RegEnableReflectionKey.call(hkey) if ret != 0: raiseWindowsError(space, ret, 'RegEnableReflectionKey') def QueryReflectionKey(space, w_key): """bool = QueryReflectionKey(hkey) - Determines the reflection state for the specified key. Will generally raise NotImplemented if executed on a 32-bit Operating System.""" if not _RegQueryReflectionKey.check(): raise oefmt(space.w_NotImplementedError, "not implemented on this platform") else: hkey = hkey_w(w_key, space) with lltype.scoped_alloc(rwin32.LPBOOL.TO, 1) as isDisabled: ret = _RegQueryReflectionKey.call(hkey, isDisabled) if ret != 0: raiseWindowsError(space, ret, 'RegQueryReflectionKey') return space.newbool(intmask(isDisabled[0]) != 0) @unwrap_spec(sub_key="unicode", access=r_uint, reserved=int) def DeleteKeyEx(space, w_key, sub_key, access=rwinreg.KEY_WOW64_64KEY, reserved=0): """DeleteKeyEx(key, sub_key, sam, res) - Deletes the specified key. key is an already open key, or any one of the predefined HKEY_* constants. sub_key is a string that must be a subkey of the key identified by the key parameter. res is a reserved integer, and must be zero. Default is zero. sam is an integer that specifies an access mask that describes the desired This value must not be None, and the key may not have subkeys. This method can not delete keys with subkeys. If the method succeeds, the entire key, including all of its values, is removed. If the method fails, a WindowsError exception is raised. On unsupported Windows versions, NotImplementedError is raised.""" if not _RegDeleteKeyExW.check(): raise oefmt(space.w_NotImplementedError, "not implemented on this platform") else: hkey = hkey_w(w_key, space) with rffi.scoped_unicode2wcharp(sub_key) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) ret = _RegDeleteKeyExW.call(hkey, c_subkey, access, reserved) if ret != 0: raiseWindowsError(space, ret, 'RegDeleteKeyEx')
41.527505
91
0.649415
from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rwinreg, rwin32, rstring from rpython.rlib.rarithmetic import r_uint, r_ulonglong, intmask from rpython.rlib.buffer import ByteBuffer from rpython.rlib.rutf8 import check_utf8 from pypy.interpreter.baseobjspace import W_Root, BufferInterfaceNotFound from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.unicodehelper import ( str_decode_utf_16, utf8_encode_utf_16) from pypy.module._codecs.interp_codecs import CodecState from rpython.translator.tool.cbuild import ExternalCompilationInfo eci = ExternalCompilationInfo( includes=['windows.h'], post_include_bits=[ "RPY_EXTERN LONG\n" "pypy_RegChangeReflectionKey(FARPROC address, HKEY key);\n" "RPY_EXTERN LONG\n" "pypy_RegQueryReflectionKey(FARPROC address, HKEY key, LPBOOL isDisabled);\n" "RPY_EXTERN LONG\n" "pypy_RegDeleteKeyExW(FARPROC address, HKEY key, LPCWSTR subkey,\n" " REGSAM sam, DWORD reserved);\n" ], separate_module_sources=[''' LONG pypy_RegChangeReflectionKey(FARPROC address, HKEY key) { LONG (WINAPI *func)(HKEY); *(FARPROC*)&func = address; return func(key); } LONG pypy_RegQueryReflectionKey(FARPROC address, HKEY key, LPBOOL isDisabled) { LONG (WINAPI *func)(HKEY, LPBOOL); *(FARPROC*)&func = address; return func(key, isDisabled); } LONG pypy_RegDeleteKeyExW(FARPROC address, HKEY key, LPCWSTR subkey, REGSAM sam, DWORD reserved) { LONG (WINAPI *func)(HKEY, LPCWSTR, REGSAM, DWORD); *(FARPROC*)&func = address; return func(key, subkey, sam, reserved); } '''], ) pypy_RegChangeReflectionKey = rffi.llexternal( 'pypy_RegChangeReflectionKey', [rffi.VOIDP, rwinreg.HKEY], rffi.LONG, compilation_info=eci) pypy_RegQueryReflectionKey = rffi.llexternal( 'pypy_RegQueryReflectionKey', [rffi.VOIDP, rwinreg.HKEY, rwin32.LPBOOL], rffi.LONG, compilation_info=eci) pypy_RegDeleteKeyExW = rffi.llexternal( 'pypy_RegDeleteKeyExW', [rffi.VOIDP, rwinreg.HKEY, rffi.CWCHARP, rwinreg.REGSAM, rwin32.DWORD], rffi.LONG, compilation_info=eci) def raiseWindowsError(space, errcode, context): message = rwin32.FormatErrorW(errcode) w_errcode = space.newint(errcode) w_t = space.newtuple([w_errcode, space.newtext(*message), space.w_None, w_errcode]) raise OperationError(space.w_WindowsError, w_t) class W_HKEY(W_Root): def __init__(self, space, hkey): self.hkey = hkey self.space = space self.register_finalizer(space) def _finalize_(self): try: self.Close(self.space) except: pass def as_int(self): return rffi.cast(rffi.SIZE_T, self.hkey) def descr_bool(self, space): return space.newbool(self.as_int() != 0) def descr_handle_get(self, space): return space.newint(self.as_int()) def descr_repr(self, space): return space.newtext("<PyHKEY:0x%x>" % (self.as_int(),)) def descr_int(self, space): return space.newint(self.as_int()) def descr__enter__(self, space): return self def descr__exit__(self, space, __args__): CloseKey(space, self) def Close(self, space): CloseKey(space, self) def Detach(self, space): key = self.as_int() self.hkey = rwin32.NULL_HANDLE return space.newint(key) @unwrap_spec(key=int) def new_HKEY(space, w_subtype, key): hkey = rffi.cast(rwinreg.HKEY, key) return W_HKEY(space, hkey) descr_HKEY_new = interp2app(new_HKEY) W_HKEY.typedef = TypeDef( "winreg.HKEYType", __doc__="""\ PyHKEY Object - A Python object, representing a win32 registry key. This object wraps a Windows HKEY object, automatically closing it when the object is destroyed. To guarantee cleanup, you can call either the Close() method on the PyHKEY, or the CloseKey() method. All functions which accept a handle object also accept an integer - however, use of the handle object is encouraged. Functions: Close() - Closes the underlying handle. Detach() - Returns the integer Win32 handle, detaching it from the object Properties: handle - The integer Win32 handle. Operations: __bool__ - Handles with an open object return true, otherwise false. __int__ - Converting a handle to an integer returns the Win32 handle. __cmp__ - Handle objects are compared using the handle value.""", __new__=descr_HKEY_new, __repr__=interp2app(W_HKEY.descr_repr), __int__=interp2app(W_HKEY.descr_int), __bool__=interp2app(W_HKEY.descr_bool), __enter__=interp2app(W_HKEY.descr__enter__), __exit__=interp2app(W_HKEY.descr__exit__), handle=GetSetProperty(W_HKEY.descr_handle_get), Close=interp2app(W_HKEY.Close), Detach=interp2app(W_HKEY.Detach), ) def hkey_w(w_hkey, space): if space.is_w(w_hkey, space.w_None): raise oefmt(space.w_TypeError, "None is not a valid HKEY in this context") elif isinstance(w_hkey, W_HKEY): return w_hkey.hkey elif space.isinstance_w(w_hkey, space.w_int): if space.is_true(space.lt(w_hkey, space.newint(0))): return rffi.cast(rwinreg.HKEY, space.int_w(w_hkey)) return rffi.cast(rwinreg.HKEY, space.uint_w(w_hkey)) else: raise oefmt(space.w_TypeError, "The object is not a PyHKEY object") def CloseKey(space, w_hkey): hkey = hkey_w(w_hkey, space) if hkey: ret = rwinreg.RegCloseKey(hkey) if ret != 0: raiseWindowsError(space, ret, 'RegCloseKey') if isinstance(w_hkey, W_HKEY): space.interp_w(W_HKEY, w_hkey).hkey = rwin32.NULL_HANDLE def FlushKey(space, w_hkey): hkey = hkey_w(w_hkey, space) if hkey: ret = rwinreg.RegFlushKey(hkey) if ret != 0: raiseWindowsError(space, ret, 'RegFlushKey') @unwrap_spec(subkey="unicode", filename="unicode") def LoadKey(space, w_hkey, subkey, filename): hkey = hkey_w(w_hkey, space) with rffi.scoped_unicode2wcharp(subkey) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) with rffi.scoped_unicode2wcharp(filename) as wide_filename: c_filename = rffi.cast(rffi.CWCHARP, wide_filename) ret = rwinreg.RegLoadKeyW(hkey, c_subkey, c_filename) if ret != 0: raiseWindowsError(space, ret, 'RegLoadKey') @unwrap_spec(filename="unicode") def SaveKey(space, w_hkey, filename): hkey = hkey_w(w_hkey, space) with rffi.scoped_unicode2wcharp(filename) as wide_filename: c_filename = rffi.cast(rffi.CWCHARP, wide_filename) ret = rwinreg.RegSaveKeyW(hkey, c_filename, None) if ret != 0: raiseWindowsError(space, ret, 'RegSaveKey') @unwrap_spec(typ=int) def SetValue(space, w_hkey, w_subkey, typ, w_value): if typ != rwinreg.REG_SZ: raise oefmt(space.w_ValueError, "Type must be winreg.REG_SZ") hkey = hkey_w(w_hkey, space) state = space.fromcache(CodecState) errh = state.encode_error_handler utf8 = space.utf8_w(w_subkey) subkeyW = utf8_encode_utf_16(utf8 + '\x00', 'strict', errh, allow_surrogates=True) utf8 = space.utf8_w(w_value) valueW = utf8_encode_utf_16(utf8 + '\x00', 'strict', errh, allow_surrogates=True) valueL = space.len_w(w_value) with rffi.scoped_nonmovingbuffer(subkeyW) as subkeyP0: subkeyP = rffi.cast(rffi.CWCHARP, rffi.ptradd(subkeyP0, 2)) with rffi.scoped_nonmovingbuffer(valueW) as valueP0: valueP = rffi.cast(rffi.CWCHARP, rffi.ptradd(valueP0, 2)) ret = rwinreg.RegSetValueW(hkey, subkeyP, rwinreg.REG_SZ, valueP, valueL) if ret != 0: raiseWindowsError(space, ret, 'RegSetValue') def QueryValue(space, w_hkey, w_subkey): hkey = hkey_w(w_hkey, space) if space.is_w(w_subkey, space.w_None): subkey = None else: subkey = space.utf8_w(w_subkey).decode('utf8') with rffi.scoped_unicode2wcharp(subkey) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) with lltype.scoped_alloc(rwin32.PLONG.TO, 1) as bufsize_p: bufsize_p[0] = rffi.cast(rwin32.LONG, 0) ret = rwinreg.RegQueryValueW(hkey, c_subkey, None, bufsize_p) bufSize = intmask(bufsize_p[0]) if ret == rwinreg.ERROR_MORE_DATA: bufSize = 256 elif ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') while True: buf = ByteBuffer(bufSize) bufP = rffi.cast(rffi.CWCHARP, buf.get_raw_address()) ret = rwinreg.RegQueryValueW(hkey, c_subkey, bufP, bufsize_p) if ret == rwinreg.ERROR_MORE_DATA: bufSize *= 2 bufsize_p[0] = rffi.cast(rwin32.LONG, bufSize) continue if ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') utf8, lgt = wbuf_to_utf8(space, buf[0:intmask(bufsize_p[0])]) return space.newtext(utf8, lgt) def convert_to_regdata(space, w_value, typ): buf = None if typ == rwinreg.REG_DWORD: if space.is_none(w_value) or space.isinstance_w(w_value, space.w_int): if space.is_none(w_value): value = r_uint(0) else: value = space.c_uint_w(w_value) buflen = rffi.sizeof(rwin32.DWORD) buf1 = lltype.malloc(rffi.CArray(rwin32.DWORD), 1, flavor='raw') buf1[0] = rffi.cast(rffi.UINT, value) buf = rffi.cast(rffi.CCHARP, buf1) elif typ == rwinreg.REG_QWORD: if space.is_none(w_value) or space.isinstance_w(w_value, space.w_int): if space.is_none(w_value): value = r_ulonglong(0) else: value = space.r_ulonglong_w(w_value) buflen = rffi.sizeof(rffi.ULONGLONG) buf1 = lltype.malloc(rffi.CArray(rffi.ULONGLONG), 1, flavor='raw') buf1[0] = rffi.cast(rffi.ULONGLONG, value) buf = rffi.cast(rffi.CCHARP, buf1) elif typ == rwinreg.REG_SZ or typ == rwinreg.REG_EXPAND_SZ: if space.is_w(w_value, space.w_None): buflen = 1 buf = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') buf[0] = '\0' else: buf = rffi.unicode2wcharp(space.utf8_w(w_value).decode('utf8')) buf = rffi.cast(rffi.CCHARP, buf) buflen = (space.len_w(w_value) * 2) + 1 elif typ == rwinreg.REG_MULTI_SZ: if space.is_w(w_value, space.w_None): buflen = 1 buf = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') buf[0] = '\0' elif space.isinstance_w(w_value, space.w_list): strings = [] buflen = 0 w_iter = space.iter(w_value) while True: try: w_item = space.next(w_iter) item = space.utf8_w(w_item).decode('utf8') strings.append(item) buflen += 2 * (len(item) + 1) except OperationError as e: if not e.match(space, space.w_StopIteration): raise break buflen += 2 buf = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') buflen = 0 for string in strings: with rffi.scoped_unicode2wcharp(string) as wchr: c_str = rffi.cast(rffi.CCHARP, wchr) for i in range(len(string) * 2): buf[buflen + i] = c_str[i] buflen += (len(string) + 1) * 2 buf[buflen - 1] = '\0' buf[buflen - 2] = '\0' buflen += 2 buf[buflen - 1] = '\0' buf[buflen - 2] = '\0' else: if space.is_w(w_value, space.w_None): buflen = 0 buf = lltype.nullptr(rffi.CCHARP.TO) else: try: value = w_value.buffer_w(space, space.BUF_SIMPLE) except BufferInterfaceNotFound: raise oefmt(space.w_TypeError, "Objects of type '%T' can not be used as binary " "registry values", w_value) else: value = value.as_str() buflen = len(value) buf = rffi.str2charp(value) if buf is not None: return rffi.cast(rffi.CWCHARP, buf), buflen raise oefmt(space.w_ValueError, "Could not convert the data to the specified type") def wbuf_to_utf8(space, wbuf): state = space.fromcache(CodecState) errh = state.decode_error_handler utf8, lgt, pos = str_decode_utf_16(wbuf, 'surrogatepass', final=True, errorhandler=errh) if len(utf8) > 1 and utf8[len(utf8) - 1] == '\x00': newlen = len(utf8) - 1 assert newlen >=0 utf8 = utf8[0:newlen] lgt -= 1 return utf8, lgt def convert_from_regdata(space, buf, buflen, typ): if typ == rwinreg.REG_DWORD: if not buflen: return space.newint(0) d = rffi.cast(rwin32.LPDWORD, buf.get_raw_address())[0] return space.newint(d) elif typ == rwinreg.REG_QWORD: if not buflen: return space.newint(0) d = rffi.cast(rffi.ULONGLONGP, buf.get_raw_address())[0] return space.newint(d) elif typ == rwinreg.REG_SZ or typ == rwinreg.REG_EXPAND_SZ: if not buflen: return space.newtext('', 0) even = (buflen // 2) * 2 utf8, lgt = wbuf_to_utf8(space, buf[0:even]) i = 0 utf8len = len(utf8) while i < utf8len: if utf8[i] == '\x00': utf8 = utf8[0:i] lgt = check_utf8(utf8, True) break i += 1 w_s = space.newtext(utf8, lgt) return w_s elif typ == rwinreg.REG_MULTI_SZ: if not buflen: return space.newlist([]) even = (buflen // 2) * 2 utf8, lgt = wbuf_to_utf8(space, buf[0:even]) parts = rstring.split(utf8, '\0') partslen = len(parts) if partslen > 0 and parts[partslen-1] == '': partslen -= 1 ret = [] i = 0 while i < partslen: lgt = check_utf8(parts[i], True) ret.append(space.newtext(parts[i], lgt)) i += 1 return space.newlist(ret) else: if buflen == 0: return space.w_None else: return space.newbytes(buf[0:buflen]) @unwrap_spec(value_name="unicode", typ=int) def SetValueEx(space, w_hkey, value_name, w_reserved, typ, w_value): hkey = hkey_w(w_hkey, space) buf, buflen = convert_to_regdata(space, w_value, typ) try: with rffi.scoped_unicode2wcharp(value_name) as wide_vn: c_vn = rffi.cast(rffi.CWCHARP, wide_vn) ret = rwinreg.RegSetValueExW(hkey, c_vn, 0, typ, buf, buflen) finally: if buf != lltype.nullptr(rffi.CWCHARP.TO): lltype.free(buf, flavor='raw') if ret != 0: raiseWindowsError(space, ret, 'RegSetValueEx') def QueryValueEx(space, w_hkey, w_subkey): hkey = hkey_w(w_hkey, space) if space.is_w(w_subkey, space.w_None): subkey = None else: subkey = space.utf8_w(w_subkey).decode('utf8') null_dword = lltype.nullptr(rwin32.LPDWORD.TO) with rffi.scoped_unicode2wcharp(subkey) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as dataSize: ret = rwinreg.RegQueryValueExW(hkey, c_subkey, null_dword, null_dword, None, dataSize) bufSize = intmask(dataSize[0]) if ret == rwinreg.ERROR_MORE_DATA: bufSize = 256 elif ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') while True: dataBuf = ByteBuffer(bufSize) dataBufP = rffi.cast(rffi.CWCHARP, dataBuf.get_raw_address()) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as retType: ret = rwinreg.RegQueryValueExW(hkey, c_subkey, null_dword, retType, dataBufP, dataSize) if ret == rwinreg.ERROR_MORE_DATA: bufSize *= 2 dataSize[0] = rffi.cast(rwin32.DWORD, bufSize) continue if ret != 0: raiseWindowsError(space, ret, 'RegQueryValueEx') length = intmask(dataSize[0]) ret_type = intmask(retType[0]) return space.newtuple([ convert_from_regdata(space, dataBuf, length, ret_type), space.newint(intmask(ret_type)), ]) @unwrap_spec(subkey="unicode") def CreateKey(space, w_hkey, subkey): hkey = hkey_w(w_hkey, space) with rffi.scoped_unicode2wcharp(subkey) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegCreateKeyW(hkey, c_subkey, rethkey) if ret != 0: raiseWindowsError(space, ret, 'CreateKey') return W_HKEY(space, rethkey[0]) @unwrap_spec(sub_key="unicode", reserved=int, access=r_uint) def CreateKeyEx(space, w_key, sub_key, reserved=0, access=rwinreg.KEY_WRITE): hkey = hkey_w(w_key, space) with rffi.scoped_unicode2wcharp(sub_key) as wide_sub_key: c_subkey = rffi.cast(rffi.CWCHARP, wide_sub_key) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegCreateKeyExW(hkey, c_subkey, reserved, None, 0, access, None, rethkey, lltype.nullptr(rwin32.LPDWORD.TO)) if ret != 0: raiseWindowsError(space, ret, 'CreateKeyEx') return W_HKEY(space, rethkey[0]) @unwrap_spec(subkey="unicode") def DeleteKey(space, w_hkey, subkey): hkey = hkey_w(w_hkey, space) with rffi.scoped_unicode2wcharp(subkey) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) ret = rwinreg.RegDeleteKeyW(hkey, c_subkey) if ret != 0: raiseWindowsError(space, ret, 'RegDeleteKey') @unwrap_spec(subkey="unicode") def DeleteValue(space, w_hkey, subkey): hkey = hkey_w(w_hkey, space) with rffi.scoped_unicode2wcharp(subkey) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) ret = rwinreg.RegDeleteValueW(hkey, c_subkey) if ret != 0: raiseWindowsError(space, ret, 'RegDeleteValue') @unwrap_spec(reserved=int, access=r_uint) def OpenKey(space, w_key, w_sub_key, reserved=0, access=rwinreg.KEY_READ): hkey = hkey_w(w_key, space) utf8 = space.utf8_w(w_sub_key) state = space.fromcache(CodecState) errh = state.encode_error_handler subkeyW = utf8_encode_utf_16(utf8 + '\x00', 'strict', errh, allow_surrogates=True) with rffi.scoped_nonmovingbuffer(subkeyW) as subkeyP0: subkeyP = rffi.cast(rffi.CWCHARP, rffi.ptradd(subkeyP0, 2)) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegOpenKeyExW(hkey, subkeyP, reserved, access, rethkey) if ret != 0: raiseWindowsError(space, ret, 'RegOpenKeyEx') return W_HKEY(space, rethkey[0]) @unwrap_spec(index=int) def EnumValue(space, w_hkey, index): hkey = hkey_w(w_hkey, space) null_dword = lltype.nullptr(rwin32.LPDWORD.TO) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as valueSize: with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as dataSize: ret = rwinreg.RegQueryInfoKeyW( hkey, None, null_dword, null_dword, null_dword, null_dword, null_dword, null_dword, valueSize, dataSize, null_dword, lltype.nullptr(rwin32.PFILETIME.TO)) if ret != 0: raiseWindowsError(space, ret, 'RegQueryInfoKey') valueSize[0] = rffi.cast(rwin32.DWORD, intmask(valueSize[0]) + 1) dataSize[0] = rffi.cast(rwin32.DWORD, intmask(dataSize[0]) + 1) bufDataSize = intmask(dataSize[0]) bufValueSize = intmask(valueSize[0]) * 2 valueBuf = ByteBuffer(bufValueSize) valueBufP = rffi.cast(rffi.CWCHARP, valueBuf.get_raw_address()) while True: dataBuf = ByteBuffer(bufDataSize) dataBufP = rffi.cast(rffi.CCHARP, dataBuf.get_raw_address()) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as retType: ret = rwinreg.RegEnumValueW( hkey, index, valueBufP, valueSize, null_dword, retType, dataBufP, dataSize) if ret == rwinreg.ERROR_MORE_DATA: bufDataSize *= 4 dataSize[0] = rffi.cast(rwin32.DWORD, bufDataSize) valueSize[0] = rffi.cast(rwin32.DWORD, bufValueSize) continue if ret != 0: raiseWindowsError(space, ret, 'RegEnumValue') length = intmask(dataSize[0]) vlen = (intmask(valueSize[0]) + 1) * 2 utf8v, lenv = wbuf_to_utf8(space, valueBuf[0:vlen]) ret_type = intmask(retType[0]) return space.newtuple([ space.newtext(utf8v, lenv), convert_from_regdata(space, dataBuf, length, ret_type), space.newint(ret_type), ]) @unwrap_spec(index=int) def EnumKey(space, w_hkey, index): hkey = hkey_w(w_hkey, space) null_dword = lltype.nullptr(rwin32.LPDWORD.TO) buf = ByteBuffer(257 * 2) bufP = rffi.cast(rwin32.LPWSTR, buf.get_raw_address()) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as valueSize: valueSize[0] = rffi.cast(rwin32.DWORD, 257) ret = rwinreg.RegEnumKeyExW(hkey, index, bufP, valueSize, null_dword, None, null_dword, lltype.nullptr(rwin32.PFILETIME.TO)) if ret != 0: raiseWindowsError(space, ret, 'RegEnumKeyEx') vlen = intmask(valueSize[0]) * 2 utf8, lgt = wbuf_to_utf8(space, buf[0:vlen]) return space.newtext(utf8, lgt) def QueryInfoKey(space, w_hkey): hkey = hkey_w(w_hkey, space) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as nSubKeys: with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as nValues: with lltype.scoped_alloc(rwin32.PFILETIME.TO, 1) as ft: null_dword = lltype.nullptr(rwin32.LPDWORD.TO) ret = rwinreg.RegQueryInfoKeyW( hkey, None, null_dword, null_dword, nSubKeys, null_dword, null_dword, nValues, null_dword, null_dword, null_dword, ft) if ret != 0: raiseWindowsError(space, ret, 'RegQueryInfoKey') lgt = ((lltype.r_longlong(ft[0].c_dwHighDateTime) << 32) + lltype.r_longlong(ft[0].c_dwLowDateTime)) return space.newtuple([space.newint(nSubKeys[0]), space.newint(nValues[0]), space.newint(lgt)]) def ConnectRegistry(space, w_machine, w_hkey): hkey = hkey_w(w_hkey, space) if space.is_none(w_machine): with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegConnectRegistryW(None, hkey, rethkey) if ret != 0: raiseWindowsError(space, ret, 'RegConnectRegistry') return W_HKEY(space, rethkey[0]) else: utf8 = space.utf8_w(w_machine) state = space.fromcache(CodecState) errh = state.encode_error_handler machineW = utf8_encode_utf_16(utf8 + '\x00', 'strict', errh, allow_surrogates=True) with rffi.scoped_nonmovingbuffer(machineW) as machineP0: machineP = rffi.cast(rwin32.LPWSTR, rffi.ptradd(machineP0, 2)) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegConnectRegistryW(machineP, hkey, rethkey) if ret != 0: raiseWindowsError(space, ret, 'RegConnectRegistry') return W_HKEY(space, rethkey[0]) def ExpandEnvironmentStrings(space, w_source): try: source, source_ulen = space.utf8_len_w(w_source) res, res_ulen = rwinreg.ExpandEnvironmentStrings(source, source_ulen) return space.newutf8(res, res_ulen) except WindowsError as e: raise wrap_oserror(space, e) class ReflectionFunction(object): def __init__(self, name, stdcall_wrapper): self.name = name self.handle = lltype.nullptr(rffi.VOIDP.TO) self.wrapper = stdcall_wrapper def check(self): if self.handle != lltype.nullptr(rffi.VOIDP.TO): return True from rpython.rlib.rdynload import GetModuleHandle, dlsym lib = GetModuleHandle("advapi32.dll") try: handle = dlsym(lib, self.name) except KeyError: return False self.handle = handle return True def call(self, *args): assert self.handle != lltype.nullptr(rffi.VOIDP.TO) return self.wrapper(self.handle, *args) _RegDisableReflectionKey = ReflectionFunction( "RegDisableReflectionKey", pypy_RegChangeReflectionKey) _RegEnableReflectionKey = ReflectionFunction( "RegEnableReflectionKey", pypy_RegChangeReflectionKey) _RegQueryReflectionKey = ReflectionFunction( "RegQueryReflectionKey", pypy_RegQueryReflectionKey) _RegDeleteKeyExW = ReflectionFunction("RegDeleteKeyExW", pypy_RegDeleteKeyExW) def DisableReflectionKey(space, w_key): if not _RegDisableReflectionKey.check(): raise oefmt(space.w_NotImplementedError, "not implemented on this platform") else: hkey = hkey_w(w_key, space) ret = _RegDisableReflectionKey.call(hkey) if ret != 0: raiseWindowsError(space, ret, 'RegDisableReflectionKey') def EnableReflectionKey(space, w_key): if not _RegEnableReflectionKey.check(): raise oefmt(space.w_NotImplementedError, "not implemented on this platform") else: hkey = hkey_w(w_key, space) ret = _RegEnableReflectionKey.call(hkey) if ret != 0: raiseWindowsError(space, ret, 'RegEnableReflectionKey') def QueryReflectionKey(space, w_key): if not _RegQueryReflectionKey.check(): raise oefmt(space.w_NotImplementedError, "not implemented on this platform") else: hkey = hkey_w(w_key, space) with lltype.scoped_alloc(rwin32.LPBOOL.TO, 1) as isDisabled: ret = _RegQueryReflectionKey.call(hkey, isDisabled) if ret != 0: raiseWindowsError(space, ret, 'RegQueryReflectionKey') return space.newbool(intmask(isDisabled[0]) != 0) @unwrap_spec(sub_key="unicode", access=r_uint, reserved=int) def DeleteKeyEx(space, w_key, sub_key, access=rwinreg.KEY_WOW64_64KEY, reserved=0): if not _RegDeleteKeyExW.check(): raise oefmt(space.w_NotImplementedError, "not implemented on this platform") else: hkey = hkey_w(w_key, space) with rffi.scoped_unicode2wcharp(sub_key) as wide_subkey: c_subkey = rffi.cast(rffi.CWCHARP, wide_subkey) ret = _RegDeleteKeyExW.call(hkey, c_subkey, access, reserved) if ret != 0: raiseWindowsError(space, ret, 'RegDeleteKeyEx')
true
true
1c45d8033a8532a5310eb7b5b6868e1725ae9e8a
1,188
py
Python
profiles_api/serializers.py
vikrantgautam/profiles-rest-api
68abd9398f04de6eb87357b997dd438b6503f8ea
[ "MIT" ]
null
null
null
profiles_api/serializers.py
vikrantgautam/profiles-rest-api
68abd9398f04de6eb87357b997dd438b6503f8ea
[ "MIT" ]
null
null
null
profiles_api/serializers.py
vikrantgautam/profiles-rest-api
68abd9398f04de6eb87357b997dd438b6503f8ea
[ "MIT" ]
null
null
null
from rest_framework import serializers from profiles_api import models class HelloSerializer(serializers.Serializer): """Serializes a name field for testing our APIView""" name = serializers.CharField(max_length=10) class UserProfileSerializer(serializers.ModelSerializer): """Serializes a user profile object""" class Meta: model = models.UserProfile fields = ('id', 'email', 'name', 'password') extra_kwargs = { 'password': { 'write_only': True, 'style': {'input_type': 'password'} } } def create(self, validated_data): """Create and return a new user""" user = models.UserProfile.objects.create_user( email=validated_data['email'], name=validated_data['name'], password=validated_data['password'], ) return user class ProfileFeedItemSerializer(serializers.ModelSerializer): """Serializes profile feed items""" class Meta: model = models.ProfileFeedItem fields = ('id', 'user_profile', 'status_text', 'created_on') extra_kwards = {'user_profile': {'read_only': True}}
28.285714
68
0.625421
from rest_framework import serializers from profiles_api import models class HelloSerializer(serializers.Serializer): name = serializers.CharField(max_length=10) class UserProfileSerializer(serializers.ModelSerializer): class Meta: model = models.UserProfile fields = ('id', 'email', 'name', 'password') extra_kwargs = { 'password': { 'write_only': True, 'style': {'input_type': 'password'} } } def create(self, validated_data): user = models.UserProfile.objects.create_user( email=validated_data['email'], name=validated_data['name'], password=validated_data['password'], ) return user class ProfileFeedItemSerializer(serializers.ModelSerializer): class Meta: model = models.ProfileFeedItem fields = ('id', 'user_profile', 'status_text', 'created_on') extra_kwards = {'user_profile': {'read_only': True}}
true
true
1c45d82e9f5994d25e1d89ebc33ba778c614bf38
1,850
py
Python
weasyl/cron.py
theSeracen/weasyl
c13b4f61f559ce44bfaee027ffc59a1379d25f3e
[ "Apache-2.0" ]
null
null
null
weasyl/cron.py
theSeracen/weasyl
c13b4f61f559ce44bfaee027ffc59a1379d25f3e
[ "Apache-2.0" ]
148
2021-03-16T07:40:05.000Z
2022-03-21T08:14:46.000Z
weasyl/cron.py
theSeracen/weasyl
c13b4f61f559ce44bfaee027ffc59a1379d25f3e
[ "Apache-2.0" ]
null
null
null
import arrow from twisted.python import log from weasyl.define import engine from weasyl import index, submission def run_periodic_tasks(): # An arrow object representing the current UTC time now = arrow.utcnow() db = engine.connect() with db.begin(): locked = db.scalar("SELECT pg_try_advisory_xact_lock(0)") if not locked: return last_run = arrow.get(db.scalar("SELECT last_run FROM cron_runs")) if not last_run or now < last_run.replace(second=59): return # Recache the latest submissions # Every 2 minutes if now.minute % 2 == 0: index.recent_submissions.refresh() log.msg('refreshed recent submissions') # Recalculate recently popular submissions # Every 10 minutes if now.minute % 10 == 0: submission.select_recently_popular.refresh() log.msg('refreshed recently popular submissions') # Delete all records from views table # Every 15 minutes if now.minute % 15 == 0: db.execute("DELETE FROM views") log.msg('cleared views') # Daily at 0:00 if now.hour == 0 and now.minute == 0: # Delete email reset requests older than two days db.execute(""" DELETE FROM emailverify WHERE createtimestamp < (NOW() - INTERVAL '2 days') """) log.msg('cleared stale email change records') # Purge stale logincreate records older than two days db.execute(""" DELETE FROM logincreate WHERE created_at < (NOW() - INTERVAL '2 days') """) log.msg('cleared stale account creation records') db.execute("UPDATE cron_runs SET last_run = %(now)s", now=now.naive)
33.035714
76
0.58973
import arrow from twisted.python import log from weasyl.define import engine from weasyl import index, submission def run_periodic_tasks(): now = arrow.utcnow() db = engine.connect() with db.begin(): locked = db.scalar("SELECT pg_try_advisory_xact_lock(0)") if not locked: return last_run = arrow.get(db.scalar("SELECT last_run FROM cron_runs")) if not last_run or now < last_run.replace(second=59): return if now.minute % 2 == 0: index.recent_submissions.refresh() log.msg('refreshed recent submissions') if now.minute % 10 == 0: submission.select_recently_popular.refresh() log.msg('refreshed recently popular submissions') if now.minute % 15 == 0: db.execute("DELETE FROM views") log.msg('cleared views') if now.hour == 0 and now.minute == 0: db.execute(""" DELETE FROM emailverify WHERE createtimestamp < (NOW() - INTERVAL '2 days') """) log.msg('cleared stale email change records') db.execute(""" DELETE FROM logincreate WHERE created_at < (NOW() - INTERVAL '2 days') """) log.msg('cleared stale account creation records') db.execute("UPDATE cron_runs SET last_run = %(now)s", now=now.naive)
true
true
1c45d908aa737c8cc2b138b76e17ef1a9a3d56e4
871
py
Python
evennia/commands/default/cmdset_unloggedin.py
fermuch/evennia
8961baa0a5b9b5419f864a144f080acc68a7ad0f
[ "BSD-3-Clause" ]
3
2019-08-08T16:58:25.000Z
2019-10-12T07:31:36.000Z
evennia/commands/default/cmdset_unloggedin.py
fermuch/evennia
8961baa0a5b9b5419f864a144f080acc68a7ad0f
[ "BSD-3-Clause" ]
9
2019-09-06T18:21:59.000Z
2022-01-13T03:04:11.000Z
evennia/commands/default/cmdset_unloggedin.py
fermuch/evennia
8961baa0a5b9b5419f864a144f080acc68a7ad0f
[ "BSD-3-Clause" ]
2
2019-09-02T08:39:24.000Z
2019-09-02T18:39:32.000Z
""" This module describes the unlogged state of the default game. The setting STATE_UNLOGGED should be set to the python path of the state instance in this module. """ from evennia.commands.cmdset import CmdSet from evennia.commands.default import unloggedin class UnloggedinCmdSet(CmdSet): """ Sets up the unlogged cmdset. """ key = "DefaultUnloggedin" priority = 0 def at_cmdset_creation(self): "Populate the cmdset" self.add(unloggedin.CmdUnconnectedConnect()) self.add(unloggedin.CmdUnconnectedCreate()) self.add(unloggedin.CmdUnconnectedQuit()) self.add(unloggedin.CmdUnconnectedLook()) self.add(unloggedin.CmdUnconnectedHelp()) self.add(unloggedin.CmdUnconnectedEncoding()) self.add(unloggedin.CmdUnconnectedScreenreader()) self.add(unloggedin.CmdUnconnectedInfo())
32.259259
61
0.72101
from evennia.commands.cmdset import CmdSet from evennia.commands.default import unloggedin class UnloggedinCmdSet(CmdSet): key = "DefaultUnloggedin" priority = 0 def at_cmdset_creation(self): self.add(unloggedin.CmdUnconnectedConnect()) self.add(unloggedin.CmdUnconnectedCreate()) self.add(unloggedin.CmdUnconnectedQuit()) self.add(unloggedin.CmdUnconnectedLook()) self.add(unloggedin.CmdUnconnectedHelp()) self.add(unloggedin.CmdUnconnectedEncoding()) self.add(unloggedin.CmdUnconnectedScreenreader()) self.add(unloggedin.CmdUnconnectedInfo())
true
true
1c45db6d3cecdc6f61c22f43e5bb581f20cf7a6b
2,437
py
Python
naeval/ner/models/tomita.py
sdspieg/naeval
52c4a508bf212b95d4e610cfe1b5e23b8ca94d2f
[ "MIT" ]
36
2020-03-22T09:37:10.000Z
2022-01-17T14:49:30.000Z
naeval/ner/models/tomita.py
sdspieg/naeval
52c4a508bf212b95d4e610cfe1b5e23b8ca94d2f
[ "MIT" ]
11
2020-03-25T09:39:45.000Z
2020-08-16T05:37:02.000Z
naeval/ner/models/tomita.py
sdspieg/naeval
52c4a508bf212b95d4e610cfe1b5e23b8ca94d2f
[ "MIT" ]
6
2020-05-16T05:52:04.000Z
2022-01-16T06:45:29.000Z
from naeval.const import TOMITA, PER from naeval.record import Record from naeval.io import parse_xml from naeval.span import Span from ..adapt import adapt_tomita from ..markup import Markup from .base import Model, post TOMITA_IMAGE = 'natasha/tomita-algfio' TOMITA_CONTAINER_PORT = 8080 TOMITA_URL = 'http://{host}:{port}/' class TomitaFact(Record): __attributes__ = [ 'start', 'stop', 'first', 'last', 'middle', 'known_surname' ] def __init__(self, start, stop, first, last, middle, known_surname): self.start = start self.stop = stop self.first = first self.last = last self.middle = middle self.known_surname = known_surname class TomitaMarkup(Markup): @property def adapted(self): return adapt_tomita(self) def parse_facts(xml): if xml is None: return for item in xml.findall('Person'): start = int(item.get('pos')) size = int(item.get('len')) stop = start + size last = item.find('Name_Surname') if last is not None: last = last.get('val') or None first = item.find('Name_FirstName') if first is not None: first = first.get('val') middle = item.find('Name_Patronymic') if middle is not None: middle = middle.get('val') known_surname = item.find('Name_SurnameIsDictionary') if known_surname is not None: known_surname = int(known_surname.get('val')) known_surname = bool(known_surname) yield TomitaFact( start, stop, first, last, middle, known_surname ) def fact_spans(facts): for fact in facts: yield Span(fact.start, fact.stop, PER) def parse_tomita(text, xml): assert xml.tag == 'document', xml.tag facts = xml.find('facts') facts = parse_facts(facts) spans = list(fact_spans(facts)) return TomitaMarkup(text, spans) def call_tomita(text, host, port): url = TOMITA_URL.format( host=host, port=port ) payload = text.encode('utf8') response = post(url, data=payload) xml = parse_xml(response.text) return parse_tomita(text, xml) class TomitaModel(Model): name = TOMITA image = TOMITA_IMAGE container_port = TOMITA_CONTAINER_PORT def __call__(self, text): return call_tomita(text, self.host, self.port)
24.867347
61
0.622487
from naeval.const import TOMITA, PER from naeval.record import Record from naeval.io import parse_xml from naeval.span import Span from ..adapt import adapt_tomita from ..markup import Markup from .base import Model, post TOMITA_IMAGE = 'natasha/tomita-algfio' TOMITA_CONTAINER_PORT = 8080 TOMITA_URL = 'http://{host}:{port}/' class TomitaFact(Record): __attributes__ = [ 'start', 'stop', 'first', 'last', 'middle', 'known_surname' ] def __init__(self, start, stop, first, last, middle, known_surname): self.start = start self.stop = stop self.first = first self.last = last self.middle = middle self.known_surname = known_surname class TomitaMarkup(Markup): @property def adapted(self): return adapt_tomita(self) def parse_facts(xml): if xml is None: return for item in xml.findall('Person'): start = int(item.get('pos')) size = int(item.get('len')) stop = start + size last = item.find('Name_Surname') if last is not None: last = last.get('val') or None first = item.find('Name_FirstName') if first is not None: first = first.get('val') middle = item.find('Name_Patronymic') if middle is not None: middle = middle.get('val') known_surname = item.find('Name_SurnameIsDictionary') if known_surname is not None: known_surname = int(known_surname.get('val')) known_surname = bool(known_surname) yield TomitaFact( start, stop, first, last, middle, known_surname ) def fact_spans(facts): for fact in facts: yield Span(fact.start, fact.stop, PER) def parse_tomita(text, xml): assert xml.tag == 'document', xml.tag facts = xml.find('facts') facts = parse_facts(facts) spans = list(fact_spans(facts)) return TomitaMarkup(text, spans) def call_tomita(text, host, port): url = TOMITA_URL.format( host=host, port=port ) payload = text.encode('utf8') response = post(url, data=payload) xml = parse_xml(response.text) return parse_tomita(text, xml) class TomitaModel(Model): name = TOMITA image = TOMITA_IMAGE container_port = TOMITA_CONTAINER_PORT def __call__(self, text): return call_tomita(text, self.host, self.port)
true
true
1c45dbebdbe4e22104a31a6023c49fc2d26290c2
10,611
py
Python
src/cargrid.py
Potgront/ABM
76fef2c7ded7e362ecf72fffd82512b9d7926700
[ "BSD-3-Clause" ]
null
null
null
src/cargrid.py
Potgront/ABM
76fef2c7ded7e362ecf72fffd82512b9d7926700
[ "BSD-3-Clause" ]
null
null
null
src/cargrid.py
Potgront/ABM
76fef2c7ded7e362ecf72fffd82512b9d7926700
[ "BSD-3-Clause" ]
null
null
null
""" Module which defines the car agents """ from mesa import Agent import numpy as np class Car(Agent): """ Class which defines the inidividual car agents. Each car has a specific unique_id and an index which is the unique_id modulo the resolution of the LaneSpace. Attributes: unique_id (int): An integer value which is uniquely defines each agent model (obj): An instance of the model class index (int): The unique_id modulo the resolution of the LaneSpace defined in the model instance. pos (tuple): The position of the car agent. The first argument of the tuple defines the position along the horizontal continuous axis, the second value specifies the current lane of the car. max_speed (float): A constant value which defines the upper limit of the cars' speed. speed (float): A variable value which specifies the current speed of the car, cannot exceed the value defined in max_speed. agression (float): Defines the agression of the car, between [0,1]. Results in the car swiching lanes more often as it increases. Also makes acceleration more likely if possible. gap (float): Defines the space cars keep between each other, relative to their speeds, e.g. a value of 2 would result in a gap equal to twice the cars' speed, so 20 meters at a speed of 10 m/s. Higher agression can occasionally override this specified gap. switch_delay (int): Defines the number of time steps a car waits between switching lanes. High agression lowers this value. switched (int): Counter for the number of time steps since last switching a lane. Decreases by one each timestep and allows the car to switch lanes if at zero. Is set to the value of switch_delay if the car switches a lane. """ # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-arguments def __init__(self, unique_id, model, start_lane, speed, agression, min_gap): """ Args: unique_id (int): The unique id of the current agent, generated by agent scheduler. model (obj): Instance of the LaneSpace model start_lane (int): The lane in which the car should start. speed (float): Initial speed of the car, also used to set the maximum speed of the car. agression (float): Agression of the car, bounded between [0,1]. min_gap (float): The absolute minimum space the car should maintain, relative to it's own speed. """ super().__init__(unique_id, model) self.start_lane = start_lane self.index = self.unique_id % model.grid.length self.pos = (0.0, start_lane) self.max_speed = speed+(abs(np.random.randn())*agression) self.speed = self.max_speed self.agression = agression self.gap = np.random.rand() / agression + min_gap self.switch_delay = int(5 / agression / self.model.time_step) self.switched = self.switch_delay def compute_pars(self, FRONT, BACK): """compute_pars Method which determines weather a car can switch to another lane or maintain it's current speed. Args: FRONT (list): A list which should contain the positions of the cars in front of the current car, on the right, middle and left. In that specific order. BACK (list): A list which should contain the postitions of the cars behind the current car, on the right, middle and left. In that specific order. Returns: can_left (bool): Can the car switch to the left? can_middle (bool): Can the car go forward? can_right (bool): Can the car switch to the right? """ rf, mf, lf = FRONT # right_front, middle_front, left_front rb, mb, lb = BACK # right_back, middle_back, left_back """ Can the car turn left? The available space to the left front car should be larger than the cars' current speed multiplied by the minimum gap the car has to maintain. The gap to the car behind should be 0.5 times this distance as the car is also moving forward. Also checks if a lane exists on the left and if the car has recovered from the previous lane switch """ can_left = lf-self.pos[0] > self.gap * self.speed and\ self.pos[0]-lb > 0.5*self.gap * self.speed and\ self.pos[1] < (self.model.lanes - 1) and\ self.switched == 0 """ Can the car turn right? The available space to the right front should be larger than the cars' current speed multiplied by the minimum gap the car has to maintain. The gap to the car behind should be 0.5 times this distance as the car is also moving forward. Also checks if the car is not already in the rightmost lane and has recovered from the previous lane switch """ can_right = rf-self.pos[0] > self.gap * self.speed and\ self.pos[0]-rb > 0.5*self.gap * self.speed and\ self.pos[1] > 0 and\ self.switched == 0 """ Can the car go forward (to the middle)? This gap should be larger than minimum gap multiplied by the cars' current speed. """ can_middle = mf - self.pos[0] > self.gap*self.speed return can_left, can_middle, can_right def get_move(self): """ Method which determines the best possible move of the car, depedent on the agression, current speed, minimal gap, and postions of the six surrouding cars. This move is determined in a loop which first determines which moves are initially possible. It then uses the agression of the car to decide if the car should keep right, overtake, maintain in the current lane, or slow down. Returns: Integer of the best possible move: -1 if right, 0 if forward, 1 if left. """ self.switched -= 1 self.switched = max(0, self.switched) FRONT, BACK = self.model.grid.get_neighbors(self) rf, mf, lf = FRONT # right_front, middle_front, left_front rb, mb, lb = BACK # right_back, middle_back, left_back while True: cl, cm, cr = self.compute_pars(FRONT, BACK) # can_left, can_middle, can_right if cm: # Can i go forward at current speed? if cr and np.random.rand() > self.agression: """ Keep right if possible, probability decreases with increasing agression """ self.switched = self.switch_delay return -1 if (self.speed < self.max_speed) and\ (np.random.rand() < self.agression): """ Speed up if slowed down, probability increases with increasing agression. Also overtake to the left if possible """ self.check_speed(FRONT[1]-self.pos[0]) if cl: return 1 return 0 if cl and cr: # Can i go left and right? if (self.speed < self.max_speed) and\ (np.random.rand() < self.agression): """ Overtake on the left if slowed and agression allows. Speed up relative to the agression of the car. """ self.speed += np.random.rand()/self.agression*self.model.time_step self.switched = self.switch_delay return 1 if np.random.rand() > self.agression: """ Hold right if agression is low. Also slow down a bit so not to overtake on the right, scaling with agression. """ self.speed -= np.random.rand()/self.agression*self.model.time_step self.switched = self.switch_delay return -1 if rf > lf: """ Otherwise go to the lane with the most space in front. """ return -1 return 1 if cl and (np.random.rand() < self.agression): # Can i go left? """ Move to the left if agression allows. """ self.switched = self.switch_delay return 1 if cr: # Can i go right? """ Move to the right and slow down so not to overtake on the right. Deceleration decreases with agresion, so high agression could result in an overtake on the right. """ self.switched = self.switch_delay self.speed -= np.random.rand()/self.agression*self.model.time_step return -1 """ Slow down if none of the moves are possible and try all posibilites again. Recalculating the boolean values each loop. """ self.speed -= np.random.rand()*self.model.time_step def check_speed(self, gap): """ Check how much the car is slowed down and the gap to the car in front. Accelerate faster if agression is high, gap is large, or the car is slowed down a lot. Args: gap (float): Gap to the car in front """ diff = self.max_speed - self.speed space = (gap-self.speed)/self.speed/self.gap/self.agression speedup = max(np.random.rand(), np.log(diff*space))*self.model.time_step self.speed += speedup def step(self): """ Method used by the mesa scheduler to advance each car. Obtains a move from the get_move method and applies this to the overall LaneSpace model. Also performs a global check if the car is not exceeding its maximum speed. """ move = self.get_move() self.model.move(self, move) if self.speed > self.max_speed: self.speed -= np.random.rand()*self.model.time_step
43.310204
90
0.577985
from mesa import Agent import numpy as np class Car(Agent): def __init__(self, unique_id, model, start_lane, speed, agression, min_gap): super().__init__(unique_id, model) self.start_lane = start_lane self.index = self.unique_id % model.grid.length self.pos = (0.0, start_lane) self.max_speed = speed+(abs(np.random.randn())*agression) self.speed = self.max_speed self.agression = agression self.gap = np.random.rand() / agression + min_gap self.switch_delay = int(5 / agression / self.model.time_step) self.switched = self.switch_delay def compute_pars(self, FRONT, BACK): rf, mf, lf = FRONT rb, mb, lb = BACK can_left = lf-self.pos[0] > self.gap * self.speed and\ self.pos[0]-lb > 0.5*self.gap * self.speed and\ self.pos[1] < (self.model.lanes - 1) and\ self.switched == 0 can_right = rf-self.pos[0] > self.gap * self.speed and\ self.pos[0]-rb > 0.5*self.gap * self.speed and\ self.pos[1] > 0 and\ self.switched == 0 can_middle = mf - self.pos[0] > self.gap*self.speed return can_left, can_middle, can_right def get_move(self): self.switched -= 1 self.switched = max(0, self.switched) FRONT, BACK = self.model.grid.get_neighbors(self) rf, mf, lf = FRONT rb, mb, lb = BACK while True: cl, cm, cr = self.compute_pars(FRONT, BACK) if cm: if cr and np.random.rand() > self.agression: self.switched = self.switch_delay return -1 if (self.speed < self.max_speed) and\ (np.random.rand() < self.agression): self.check_speed(FRONT[1]-self.pos[0]) if cl: return 1 return 0 if cl and cr: if (self.speed < self.max_speed) and\ (np.random.rand() < self.agression): self.speed += np.random.rand()/self.agression*self.model.time_step self.switched = self.switch_delay return 1 if np.random.rand() > self.agression: self.speed -= np.random.rand()/self.agression*self.model.time_step self.switched = self.switch_delay return -1 if rf > lf: return -1 return 1 if cl and (np.random.rand() < self.agression): self.switched = self.switch_delay return 1 if cr: self.switched = self.switch_delay self.speed -= np.random.rand()/self.agression*self.model.time_step return -1 self.speed -= np.random.rand()*self.model.time_step def check_speed(self, gap): diff = self.max_speed - self.speed space = (gap-self.speed)/self.speed/self.gap/self.agression speedup = max(np.random.rand(), np.log(diff*space))*self.model.time_step self.speed += speedup def step(self): move = self.get_move() self.model.move(self, move) if self.speed > self.max_speed: self.speed -= np.random.rand()*self.model.time_step
true
true
1c45dc41168fc46b895c51c21cd20daa9a2082ba
5,247
py
Python
splink/intuition.py
rubensmau/splink
da4f5d5bc09753b6c6974af308dd1bad324d9b4b
[ "MIT" ]
176
2020-03-16T15:19:39.000Z
2022-03-30T06:38:29.000Z
splink/intuition.py
rubensmau/splink
da4f5d5bc09753b6c6974af308dd1bad324d9b4b
[ "MIT" ]
194
2020-03-01T21:32:26.000Z
2022-03-30T14:58:38.000Z
splink/intuition.py
rubensmau/splink
da4f5d5bc09753b6c6974af308dd1bad324d9b4b
[ "MIT" ]
25
2020-03-07T00:09:22.000Z
2022-03-11T16:28:06.000Z
from .model import Model from .charts import load_chart_definition, altair_if_installed_else_json import pandas as pd from math import log2 initial_template = """ Initial probability of match (prior) = λ = {lam:.4g} """ col_template = [ ("Comparison of {column_name}. Values are:", ""), ("{column_name}_l:", "{value_l}"), ("{column_name}_r:", "{value_r}"), ("Comparison has:", "{num_levels} levels"), ("Level for this comparison:", "{gamma_column_name} = {gamma_index}"), ("m probability = P(level|match):", "{m_probability:.4g}"), ("u probability = P(level|non-match):", "{u_probability:.4g}"), ("Bayes factor = m/u:", "{bayes_factor:.4g}"), ("New probability of match (updated belief):", "{updated_belief:.4g}"), ] end_template = """ Final probability of match = {final:.4g} Reminder: The m probability for a given level is the proportion of matches which are in this level. We would generally expect the highest similarity level to have the largest proportion of matches. For example, we would expect first name field to match exactly amongst most matching records, except where nicknames, aliases or typos have occurred. For a comparison column that changes through time, like address, we may expect a lower proportion of comparisons to be in the highest similarity level. The u probability for a given level is the proportion of non-matches which are in this level. We would generally expect the lowest similarity level to have the highest proportion of non-matches, but the magnitude depends on the cardinality of the field. For example, we would expect that in the vast majority of non-matching records, the date of birth field would not match. However, we would expect it to be common for gender to match amongst non-matches. """ def intuition_report(row_dict: dict, model: Model): """Generate a text summary of a row in the comparison table which explains how the match_probability was computed Args: row_dict (dict): A python dictionary representing the comparison row model (Model): splink Model object Returns: string: The intuition report """ lam = model.current_settings_obj["proportion_of_matches"] report = initial_template.format(lam=lam) current_prob = lam for cc in model.current_settings_obj.comparison_columns_list: d = cc.describe_row_dict(row_dict) bf = d["bayes_factor"] a = bf * current_prob new_p = a / (a + (1 - current_prob)) d["updated_belief"] = new_p current_prob = new_p col_report = [] col_report.append("------") for (blurb, value) in col_template: blurb_fmt = blurb.format(**d) value_fmt = value.format(**d) col_report.append(f"{blurb_fmt:<50} {value_fmt}") col_report.append("\n") col_report = "\n".join(col_report) report += col_report report += end_template.format(final=new_p) if len(model.current_settings_obj["blocking_rules"]) > 1: match_key = int(row_dict["match_key"]) br = model.current_settings_obj["blocking_rules"][match_key] br = f"\nThis comparison was generated by the blocking rule: {br}" report += br return report def _get_bayes_factors(row_dict, model): bayes_factors = [] lam = model.current_settings_obj["proportion_of_matches"] for cc in model.current_settings_obj.comparison_columns_list: row_desc = cc.describe_row_dict(row_dict, lam) bayes_factors.append(row_desc) return bayes_factors def bayes_factor_chart(row_dict, model): chart_path = "bayes_factor_chart_def.json" bayes_factor_chart_def = load_chart_definition(chart_path) bayes_factor_chart_def["data"]["values"] = _get_bayes_factors(row_dict, model) bayes_factor_chart_def["encoding"]["y"]["field"] = "column_name" del bayes_factor_chart_def["encoding"]["row"] return altair_if_installed_else_json(bayes_factor_chart_def) def bayes_factor_intuition_chart(row_dict, model): chart_path = "bayes_factor_intuition_chart_def.json" bayes_factor_intuition_chart_def = load_chart_definition(chart_path) data = _get_bayes_factors(row_dict, model) # Get initial and final bayes factors lam = model.current_settings_obj["proportion_of_matches"] bf_init = lam/(1-lam) bf_final = sum([d['log2_bayes_factor'] for d in data]) + log2(bf_init) # Sort records in descending order of influence # with start and end positions added df = pd.DataFrame(data)\ .sort_values(by="log2_bayes_factor", key=abs, ascending=False)\ .reset_index(drop=True)\ .append({ 'bayes_factor': 2**bf_final, 'log2_bayes_factor': bf_final, 'column_name': 'Final score' }, ignore_index=True ) df = pd.DataFrame({ 'bayes_factor': bf_init, 'log2_bayes_factor': log2(bf_init), 'column_name': 'Prior lambda' }, index=[0] ).append(df, ignore_index=True).reset_index() bayes_factor_intuition_chart_def["data"]["values"] = df.to_dict('records') return altair_if_installed_else_json(bayes_factor_intuition_chart_def)
37.478571
203
0.692014
from .model import Model from .charts import load_chart_definition, altair_if_installed_else_json import pandas as pd from math import log2 initial_template = """ Initial probability of match (prior) = λ = {lam:.4g} """ col_template = [ ("Comparison of {column_name}. Values are:", ""), ("{column_name}_l:", "{value_l}"), ("{column_name}_r:", "{value_r}"), ("Comparison has:", "{num_levels} levels"), ("Level for this comparison:", "{gamma_column_name} = {gamma_index}"), ("m probability = P(level|match):", "{m_probability:.4g}"), ("u probability = P(level|non-match):", "{u_probability:.4g}"), ("Bayes factor = m/u:", "{bayes_factor:.4g}"), ("New probability of match (updated belief):", "{updated_belief:.4g}"), ] end_template = """ Final probability of match = {final:.4g} Reminder: The m probability for a given level is the proportion of matches which are in this level. We would generally expect the highest similarity level to have the largest proportion of matches. For example, we would expect first name field to match exactly amongst most matching records, except where nicknames, aliases or typos have occurred. For a comparison column that changes through time, like address, we may expect a lower proportion of comparisons to be in the highest similarity level. The u probability for a given level is the proportion of non-matches which are in this level. We would generally expect the lowest similarity level to have the highest proportion of non-matches, but the magnitude depends on the cardinality of the field. For example, we would expect that in the vast majority of non-matching records, the date of birth field would not match. However, we would expect it to be common for gender to match amongst non-matches. """ def intuition_report(row_dict: dict, model: Model): lam = model.current_settings_obj["proportion_of_matches"] report = initial_template.format(lam=lam) current_prob = lam for cc in model.current_settings_obj.comparison_columns_list: d = cc.describe_row_dict(row_dict) bf = d["bayes_factor"] a = bf * current_prob new_p = a / (a + (1 - current_prob)) d["updated_belief"] = new_p current_prob = new_p col_report = [] col_report.append("------") for (blurb, value) in col_template: blurb_fmt = blurb.format(**d) value_fmt = value.format(**d) col_report.append(f"{blurb_fmt:<50} {value_fmt}") col_report.append("\n") col_report = "\n".join(col_report) report += col_report report += end_template.format(final=new_p) if len(model.current_settings_obj["blocking_rules"]) > 1: match_key = int(row_dict["match_key"]) br = model.current_settings_obj["blocking_rules"][match_key] br = f"\nThis comparison was generated by the blocking rule: {br}" report += br return report def _get_bayes_factors(row_dict, model): bayes_factors = [] lam = model.current_settings_obj["proportion_of_matches"] for cc in model.current_settings_obj.comparison_columns_list: row_desc = cc.describe_row_dict(row_dict, lam) bayes_factors.append(row_desc) return bayes_factors def bayes_factor_chart(row_dict, model): chart_path = "bayes_factor_chart_def.json" bayes_factor_chart_def = load_chart_definition(chart_path) bayes_factor_chart_def["data"]["values"] = _get_bayes_factors(row_dict, model) bayes_factor_chart_def["encoding"]["y"]["field"] = "column_name" del bayes_factor_chart_def["encoding"]["row"] return altair_if_installed_else_json(bayes_factor_chart_def) def bayes_factor_intuition_chart(row_dict, model): chart_path = "bayes_factor_intuition_chart_def.json" bayes_factor_intuition_chart_def = load_chart_definition(chart_path) data = _get_bayes_factors(row_dict, model) lam = model.current_settings_obj["proportion_of_matches"] bf_init = lam/(1-lam) bf_final = sum([d['log2_bayes_factor'] for d in data]) + log2(bf_init) df = pd.DataFrame(data)\ .sort_values(by="log2_bayes_factor", key=abs, ascending=False)\ .reset_index(drop=True)\ .append({ 'bayes_factor': 2**bf_final, 'log2_bayes_factor': bf_final, 'column_name': 'Final score' }, ignore_index=True ) df = pd.DataFrame({ 'bayes_factor': bf_init, 'log2_bayes_factor': log2(bf_init), 'column_name': 'Prior lambda' }, index=[0] ).append(df, ignore_index=True).reset_index() bayes_factor_intuition_chart_def["data"]["values"] = df.to_dict('records') return altair_if_installed_else_json(bayes_factor_intuition_chart_def)
true
true
1c45dd33925b9b9be524163ed7fb322778cad0d2
1,395
py
Python
graph_scripts/identification_graph.py
karannewatia/Mycelium
c20deab29d97025d7623af4bbf97f79f3132b415
[ "MIT", "BSD-2-Clause", "BSD-3-Clause" ]
3
2022-01-19T18:14:42.000Z
2022-02-07T19:16:17.000Z
graph_scripts/identification_graph.py
karannewatia/Mycelium
c20deab29d97025d7623af4bbf97f79f3132b415
[ "MIT", "BSD-2-Clause", "BSD-3-Clause" ]
null
null
null
graph_scripts/identification_graph.py
karannewatia/Mycelium
c20deab29d97025d7623af4bbf97f79f3132b415
[ "MIT", "BSD-2-Clause", "BSD-3-Clause" ]
null
null
null
import numpy as np import matplotlib.pyplot as plt plt.rcParams['pdf.fonttype'] = 42 plt.rcParams['ps.fonttype'] = 42 malice_vals = [0.005, 0.01, 0.02, 0.04] ind = np.arange(4) #replace these with the data obtained from identification.py k2r1 = [0.0, 0.0, 0.0008000000000000229, 0.0015999999999999348] k2r2 = [0.0, 0.0, 0.0008000000000000229, 0.0033000000000004137] k2r3 = [0.0, 0.000200000000000089, 0.001200000000000201, 0.0049000000000002375] k3r1 = [0.0, 0.0, 0.0, 0.0] k3r2 = [0.0, 0.0, 0.0, 9.999999999987796e-05] k3r3 = [0.0, 0.0, 0.0, 0.000200000000000089] font = {'size' : 17} plt.rc('font', **font) plt.gcf().subplots_adjust(bottom=0.15) plt.gcf().subplots_adjust(left=0.20) plt.plot(ind, k2r1, label = "k=2,r=1", marker="X", markersize=10, linewidth=5) plt.plot(ind, k2r2, label = "k=2,r=2", marker="X", markersize=10, linewidth=5) plt.plot(ind, k2r3, label = "k=2,r=3", marker="X", markersize=10, linewidth=5) plt.plot(ind, k3r1, label = "k=3,r=1", marker="X", markersize=10, linewidth=5) plt.plot(ind, k3r2, label = "k=3,r=2", marker="X", markersize=10, linewidth=5) plt.plot(ind, k3r3, label = "k=3,r=3", marker="X", markersize=10, linewidth=5) plt.xticks(ind, ('0.5', '1', '2', '4')) plt.xlabel('Malice rate (%)', fontsize='large') plt.ylabel('Probability of identification', fontsize='large') plt.legend() plt.savefig('../new_graphs/Identification.pdf', format='pdf')
36.710526
79
0.682437
import numpy as np import matplotlib.pyplot as plt plt.rcParams['pdf.fonttype'] = 42 plt.rcParams['ps.fonttype'] = 42 malice_vals = [0.005, 0.01, 0.02, 0.04] ind = np.arange(4) k2r1 = [0.0, 0.0, 0.0008000000000000229, 0.0015999999999999348] k2r2 = [0.0, 0.0, 0.0008000000000000229, 0.0033000000000004137] k2r3 = [0.0, 0.000200000000000089, 0.001200000000000201, 0.0049000000000002375] k3r1 = [0.0, 0.0, 0.0, 0.0] k3r2 = [0.0, 0.0, 0.0, 9.999999999987796e-05] k3r3 = [0.0, 0.0, 0.0, 0.000200000000000089] font = {'size' : 17} plt.rc('font', **font) plt.gcf().subplots_adjust(bottom=0.15) plt.gcf().subplots_adjust(left=0.20) plt.plot(ind, k2r1, label = "k=2,r=1", marker="X", markersize=10, linewidth=5) plt.plot(ind, k2r2, label = "k=2,r=2", marker="X", markersize=10, linewidth=5) plt.plot(ind, k2r3, label = "k=2,r=3", marker="X", markersize=10, linewidth=5) plt.plot(ind, k3r1, label = "k=3,r=1", marker="X", markersize=10, linewidth=5) plt.plot(ind, k3r2, label = "k=3,r=2", marker="X", markersize=10, linewidth=5) plt.plot(ind, k3r3, label = "k=3,r=3", marker="X", markersize=10, linewidth=5) plt.xticks(ind, ('0.5', '1', '2', '4')) plt.xlabel('Malice rate (%)', fontsize='large') plt.ylabel('Probability of identification', fontsize='large') plt.legend() plt.savefig('../new_graphs/Identification.pdf', format='pdf')
true
true
1c45dd47cd5d01f117d4d2dabd7d739958d96331
1,591
py
Python
setup.py
arpitban/integrate
c991a50546229c2341ad5d8571c72c819c06c4b2
[ "MIT" ]
null
null
null
setup.py
arpitban/integrate
c991a50546229c2341ad5d8571c72c819c06c4b2
[ "MIT" ]
null
null
null
setup.py
arpitban/integrate
c991a50546229c2341ad5d8571c72c819c06c4b2
[ "MIT" ]
null
null
null
""" integrate Package to integrate functions """ from setuptools import setup import versioneer DOCLINES = __doc__.split("\n") setup( # Self-descriptive entries which should always be present name='integrate', author='Arpit Bansal', description=DOCLINES[0], long_description="\n".join(DOCLINES[2:]), version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), license='MIT', install_requires=[ 'numpy', ], # Which Python importable modules should be included when your package is installed packages=['integrate', "integrate.tests"], # Optional include package data to ship with your package # Comment out this line to prevent the files from being packaged with your software # Extend/modify the list to include/exclude other items as need be package_data={'integrate': ["data/*.dat"] }, # Additional entries you may want simply uncomment the lines you want and fill in the data # author_email='me@place.org', # Author email # url='http://www.my_package.com', # Website # install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment # platforms=['Linux', # 'Mac OS-X', # 'Unix', # 'Windows'], # Valid platforms your code works on, adjust to your flavor # python_requires=">=3.5", # Python version restrictions # Manual control if final package is compressible or not, set False to prevent the .egg from being made # zip_safe=False, )
34.586957
118
0.657448
from setuptools import setup import versioneer DOCLINES = __doc__.split("\n") setup( name='integrate', author='Arpit Bansal', description=DOCLINES[0], long_description="\n".join(DOCLINES[2:]), version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), license='MIT', install_requires=[ 'numpy', ], packages=['integrate', "integrate.tests"], package_data={'integrate': ["data/*.dat"] },
true
true
1c45dd9a6f5f623f74d785fbdbad56a08b56d3f5
2,746
py
Python
xknx/remote_value/remote_value_control.py
magicbear/xknx
e6fe7bbd292e0fee29b2c4f210aff3031d76539d
[ "MIT" ]
1
2021-01-24T21:08:36.000Z
2021-01-24T21:08:36.000Z
xknx/remote_value/remote_value_control.py
magicbear/xknx
e6fe7bbd292e0fee29b2c4f210aff3031d76539d
[ "MIT" ]
54
2021-10-01T17:42:16.000Z
2022-03-31T09:22:46.000Z
xknx/remote_value/remote_value_control.py
crazyfx1/xknx
87666cc9bd9da64a84305baeff84486097346111
[ "MIT" ]
null
null
null
""" Module for managing a control remote value. Examples are switching commands with priority control, relative dimming or blinds control commands. DPT 2.yyy and DPT 3.yyy """ from __future__ import annotations from typing import TYPE_CHECKING, Any from xknx.dpt import DPTArray, DPTBinary, DPTControlStepCode from xknx.exceptions import ConversionError from .remote_value import AsyncCallbackType, GroupAddressesType, RemoteValue if TYPE_CHECKING: from xknx.xknx import XKNX class RemoteValueControl(RemoteValue[DPTBinary, Any]): """Abstraction for remote value used for controling.""" def __init__( self, xknx: XKNX, group_address: GroupAddressesType | None = None, group_address_state: GroupAddressesType | None = None, sync_state: bool | int | float | str = True, value_type: str | None = None, device_name: str | None = None, feature_name: str = "Control", after_update_cb: AsyncCallbackType | None = None, ): """Initialize control remote value.""" if value_type is None: raise ConversionError("no value type given", device_name=device_name) _dpt_class = DPTControlStepCode.parse_transcoder(value_type) if _dpt_class is None: raise ConversionError( "invalid value type", value_type=value_type, device_name=device_name ) self.dpt_class: type[DPTControlStepCode] = _dpt_class super().__init__( xknx, group_address, group_address_state, sync_state=sync_state, device_name=device_name, feature_name=feature_name, after_update_cb=after_update_cb, ) def payload_valid(self, payload: DPTArray | DPTBinary | None) -> DPTBinary | None: """Test if telegram payload may be parsed.""" # pylint: disable=no-self-use return payload if isinstance(payload, DPTBinary) else None def to_knx(self, value: Any) -> DPTBinary: """Convert value to payload.""" return DPTBinary(self.dpt_class.to_knx(value)) def from_knx(self, payload: DPTBinary) -> Any: """Convert current payload to value.""" # TODO: DPTBinary.value is int - DPTBase.from_knx requires Tuple[int, ...] - maybe use bytes return self.dpt_class.from_knx((payload.value,)) @property def unit_of_measurement(self) -> str | None: """Return the unit of measurement.""" return self.dpt_class.unit @property def ha_device_class(self) -> str | None: """Return a string representing the home assistant device class.""" return getattr(self.dpt_class, "ha_device_class", None) # type: ignore
36.131579
100
0.664967
from __future__ import annotations from typing import TYPE_CHECKING, Any from xknx.dpt import DPTArray, DPTBinary, DPTControlStepCode from xknx.exceptions import ConversionError from .remote_value import AsyncCallbackType, GroupAddressesType, RemoteValue if TYPE_CHECKING: from xknx.xknx import XKNX class RemoteValueControl(RemoteValue[DPTBinary, Any]): def __init__( self, xknx: XKNX, group_address: GroupAddressesType | None = None, group_address_state: GroupAddressesType | None = None, sync_state: bool | int | float | str = True, value_type: str | None = None, device_name: str | None = None, feature_name: str = "Control", after_update_cb: AsyncCallbackType | None = None, ): if value_type is None: raise ConversionError("no value type given", device_name=device_name) _dpt_class = DPTControlStepCode.parse_transcoder(value_type) if _dpt_class is None: raise ConversionError( "invalid value type", value_type=value_type, device_name=device_name ) self.dpt_class: type[DPTControlStepCode] = _dpt_class super().__init__( xknx, group_address, group_address_state, sync_state=sync_state, device_name=device_name, feature_name=feature_name, after_update_cb=after_update_cb, ) def payload_valid(self, payload: DPTArray | DPTBinary | None) -> DPTBinary | None: return payload if isinstance(payload, DPTBinary) else None def to_knx(self, value: Any) -> DPTBinary: return DPTBinary(self.dpt_class.to_knx(value)) def from_knx(self, payload: DPTBinary) -> Any: return self.dpt_class.from_knx((payload.value,)) @property def unit_of_measurement(self) -> str | None: return self.dpt_class.unit @property def ha_device_class(self) -> str | None: return getattr(self.dpt_class, "ha_device_class", None)
true
true
1c45dde6f471e6c02e753f85eac93a9d22fbde55
3,277
py
Python
sphinx/pocketsphinx-5prealpha/swig/python/test/decoder_test.py
anshsarkar/TailBench
25845756aee9a892229c25b681051591c94daafd
[ "MIT" ]
2
2021-01-13T21:17:42.000Z
2021-01-13T21:17:42.000Z
sphinx/pocketsphinx-5prealpha/swig/python/test/decoder_test.py
anshsarkar/TailBench
25845756aee9a892229c25b681051591c94daafd
[ "MIT" ]
null
null
null
sphinx/pocketsphinx-5prealpha/swig/python/test/decoder_test.py
anshsarkar/TailBench
25845756aee9a892229c25b681051591c94daafd
[ "MIT" ]
null
null
null
# ==================================================================== # Copyright (c) 2013 Carnegie Mellon University. All rights # reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # This work was supported in part by funding from the Defense Advanced # Research Projects Agency and the National Science Foundation of the # United States of America, and the CMU Sphinx Speech Consortium. # # THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND # ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY # NOR ITS EMPLOYEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ==================================================================== from os import environ, path from pocketsphinx.pocketsphinx import * from sphinxbase.sphinxbase import * MODELDIR = "../../../model" DATADIR = "../../../test/data" # Create a decoder with certain model config = Decoder.default_config() config.set_string('-hmm', path.join(MODELDIR, 'en-us/en-us')) config.set_string('-lm', path.join(MODELDIR, 'en-us/en-us.lm.bin')) config.set_string('-dict', path.join(MODELDIR, 'en-us/cmudict-en-us.dict')) # Decode streaming data. decoder = Decoder(config) print ("Pronunciation for word 'hello' is ", decoder.lookup_word("hello")) print ("Pronunciation for word 'abcdf' is ", decoder.lookup_word("abcdf")) decoder.start_utt() stream = open(path.join(DATADIR, 'goforward.raw'), 'rb') while True: buf = stream.read(1024) if buf: decoder.process_raw(buf, False, False) else: break decoder.end_utt() hypothesis = decoder.hyp() print ('Best hypothesis: ', hypothesis.hypstr, " model score: ", hypothesis.best_score, " confidence: ", hypothesis.prob) print ('Best hypothesis segments: ', [seg.word for seg in decoder.seg()]) # Access N best decodings. print ('Best 10 hypothesis: ') for best, i in zip(decoder.nbest(), range(10)): print (best.hypstr, best.score) stream = open(path.join(DATADIR, 'goforward.mfc'), 'rb') stream.read(4) buf = stream.read(13780) decoder.start_utt() decoder.process_cep(buf, False, True) decoder.end_utt() hypothesis = decoder.hyp() print ('Best hypothesis: ', hypothesis.hypstr, " model score: ", hypothesis.best_score, " confidence: ", hypothesis.prob)
38.552941
121
0.712237
from os import environ, path from pocketsphinx.pocketsphinx import * from sphinxbase.sphinxbase import * MODELDIR = "../../../model" DATADIR = "../../../test/data" config = Decoder.default_config() config.set_string('-hmm', path.join(MODELDIR, 'en-us/en-us')) config.set_string('-lm', path.join(MODELDIR, 'en-us/en-us.lm.bin')) config.set_string('-dict', path.join(MODELDIR, 'en-us/cmudict-en-us.dict')) decoder = Decoder(config) print ("Pronunciation for word 'hello' is ", decoder.lookup_word("hello")) print ("Pronunciation for word 'abcdf' is ", decoder.lookup_word("abcdf")) decoder.start_utt() stream = open(path.join(DATADIR, 'goforward.raw'), 'rb') while True: buf = stream.read(1024) if buf: decoder.process_raw(buf, False, False) else: break decoder.end_utt() hypothesis = decoder.hyp() print ('Best hypothesis: ', hypothesis.hypstr, " model score: ", hypothesis.best_score, " confidence: ", hypothesis.prob) print ('Best hypothesis segments: ', [seg.word for seg in decoder.seg()]) print ('Best 10 hypothesis: ') for best, i in zip(decoder.nbest(), range(10)): print (best.hypstr, best.score) stream = open(path.join(DATADIR, 'goforward.mfc'), 'rb') stream.read(4) buf = stream.read(13780) decoder.start_utt() decoder.process_cep(buf, False, True) decoder.end_utt() hypothesis = decoder.hyp() print ('Best hypothesis: ', hypothesis.hypstr, " model score: ", hypothesis.best_score, " confidence: ", hypothesis.prob)
true
true
1c45ddeda1ba48e329cee4025e6983697c5c5850
512
py
Python
scipy/sparse/linalg/setup.py
lorentzenchr/scipy
393a05ee927883ad6316b7092c851afea8f16816
[ "BSD-3-Clause" ]
9,095
2015-01-02T18:24:23.000Z
2022-03-31T20:35:31.000Z
scipy/sparse/linalg/setup.py
lorentzenchr/scipy
393a05ee927883ad6316b7092c851afea8f16816
[ "BSD-3-Clause" ]
11,500
2015-01-01T01:15:30.000Z
2022-03-31T23:07:35.000Z
scipy/sparse/linalg/setup.py
lorentzenchr/scipy
393a05ee927883ad6316b7092c851afea8f16816
[ "BSD-3-Clause" ]
5,838
2015-01-05T11:56:42.000Z
2022-03-31T23:21:19.000Z
def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('linalg', parent_package, top_path) config.add_subpackage('_isolve') config.add_subpackage('_dsolve') config.add_subpackage('_eigen') config.add_data_dir('tests') # PROPACK config.add_subpackage('_propack') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
23.272727
62
0.71875
def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('linalg', parent_package, top_path) config.add_subpackage('_isolve') config.add_subpackage('_dsolve') config.add_subpackage('_eigen') config.add_data_dir('tests') config.add_subpackage('_propack') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
true
true
1c45de15d7b5493f204a2593fa32d8e68e6eccbf
612
py
Python
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/GREMEDY/string_marker.py
JE-Chen/je_old_repo
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
[ "MIT" ]
null
null
null
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/GREMEDY/string_marker.py
JE-Chen/je_old_repo
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
[ "MIT" ]
null
null
null
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/GREMEDY/string_marker.py
JE-Chen/je_old_repo
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
[ "MIT" ]
null
null
null
'''Autogenerated by xml_generate script, do not edit!''' from OpenGL import platform as _p, arrays # Code generation uses this from OpenGL.raw.GL import _types as _cs # End users want this... from OpenGL.raw.GL._types import * from OpenGL.raw.GL import _errors from OpenGL.constant import Constant as _C import ctypes _EXTENSION_NAME = 'GL_GREMEDY_string_marker' def _f( function ): return _p.createFunction( function,_p.PLATFORM.GL,'GL_GREMEDY_string_marker',error_checker=_errors._error_checker) @_f @_p.types(None,_cs.GLsizei,ctypes.c_void_p) def glStringMarkerGREMEDY(len,string):pass
34
119
0.776144
from OpenGL import platform as _p, arrays from OpenGL.raw.GL import _types as _cs from OpenGL.raw.GL._types import * from OpenGL.raw.GL import _errors from OpenGL.constant import Constant as _C import ctypes _EXTENSION_NAME = 'GL_GREMEDY_string_marker' def _f( function ): return _p.createFunction( function,_p.PLATFORM.GL,'GL_GREMEDY_string_marker',error_checker=_errors._error_checker) @_f @_p.types(None,_cs.GLsizei,ctypes.c_void_p) def glStringMarkerGREMEDY(len,string):pass
true
true
1c45decc0c487ca311ecba9f0e1ff22edc6b5a52
10,437
bzl
Python
tools/build_defs/repo/git.bzl
FengRillian/bazel
c962975f152e30741a3affb1d41dd885543bbea6
[ "Apache-2.0" ]
3
2019-03-18T23:49:16.000Z
2021-05-30T09:44:18.000Z
tools/build_defs/repo/git.bzl
FengRillian/bazel
c962975f152e30741a3affb1d41dd885543bbea6
[ "Apache-2.0" ]
null
null
null
tools/build_defs/repo/git.bzl
FengRillian/bazel
c962975f152e30741a3affb1d41dd885543bbea6
[ "Apache-2.0" ]
null
null
null
# Copyright 2015 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rules for cloning external git repositories.""" load("@bazel_tools//tools/build_defs/repo:utils.bzl", "patch", "update_attrs", "workspace_and_buildfile") def _clone_or_update(ctx): if ((not ctx.attr.tag and not ctx.attr.commit and not ctx.attr.branch) or (ctx.attr.tag and ctx.attr.commit) or (ctx.attr.tag and ctx.attr.branch) or (ctx.attr.commit and ctx.attr.branch)): fail("Exactly one of commit, tag, or branch must be provided") shallow = "" if ctx.attr.commit: ref = ctx.attr.commit elif ctx.attr.tag: ref = "tags/" + ctx.attr.tag shallow = "--depth=1" else: ref = ctx.attr.branch shallow = "--depth=1" directory = str(ctx.path(".")) if ctx.attr.strip_prefix: directory = directory + "-tmp" if ctx.attr.shallow_since: if ctx.attr.tag: fail("shallow_since not allowed if a tag is specified; --depth=1 will be used for tags") if ctx.attr.branch: fail("shallow_since not allowed if a branch is specified; --depth=1 will be used for branches") shallow = "--shallow-since=%s" % ctx.attr.shallow_since ctx.report_progress("Cloning %s of %s" % (ref, ctx.attr.remote)) if (ctx.attr.verbose): print("git.bzl: Cloning or updating %s repository %s using strip_prefix of [%s]" % ( " (%s)" % shallow if shallow else "", ctx.name, ctx.attr.strip_prefix if ctx.attr.strip_prefix else "None", )) bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash" st = ctx.execute([bash_exe, "-c", """ cd {working_dir} set -ex ( cd {working_dir} && if ! ( cd '{dir_link}' && [[ "$(git rev-parse --git-dir)" == '.git' ]] ) >/dev/null 2>&1; then rm -rf '{directory}' '{dir_link}' git clone '{shallow}' '{remote}' '{directory}' || git clone '{remote}' '{directory}' fi git -C '{directory}' reset --hard {ref} || \ ((git -C '{directory}' fetch '{shallow}' origin {ref}:{ref} || \ git -C '{directory}' fetch origin {ref}:{ref}) && git -C '{directory}' reset --hard {ref}) git -C '{directory}' clean -xdf ) """.format( working_dir = ctx.path(".").dirname, dir_link = ctx.path("."), directory = directory, remote = ctx.attr.remote, ref = ref, shallow = shallow, )], environment = ctx.os.environ) if st.return_code: fail("error cloning %s:\n%s" % (ctx.name, st.stderr)) if ctx.attr.strip_prefix: dest_link = "{}/{}".format(directory, ctx.attr.strip_prefix) if not ctx.path(dest_link).exists: fail("strip_prefix at {} does not exist in repo".format(ctx.attr.strip_prefix)) ctx.symlink(dest_link, ctx.path(".")) if ctx.attr.init_submodules: ctx.report_progress("Updating submodules") st = ctx.execute([bash_exe, "-c", """ set -ex ( git -C '{directory}' submodule update --init --checkout --force ) """.format( directory = ctx.path("."), )], environment = ctx.os.environ) if st.return_code: fail("error updating submodules %s:\n%s" % (ctx.name, st.stderr)) ctx.report_progress("Recording actual commit") # After the fact, determine the actual commit and its date actual_commit = ctx.execute([ bash_exe, "-c", "(git -C '{directory}' log -n 1 --pretty='format:%H')".format( directory = ctx.path("."), ), ]).stdout shallow_date = ctx.execute([ bash_exe, "-c", "(git -C '{directory}' log -n 1 --pretty='format:%cd' --date=raw)".format( directory = ctx.path("."), ), ]).stdout return {"commit": actual_commit, "shallow_since": shallow_date} def _remove_dot_git(ctx): # Remove the .git directory, if present bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash" ctx.execute([ bash_exe, "-c", "rm -rf '{directory}'".format(directory = ctx.path(".git")), ]) def _update_git_attrs(orig, keys, override): result = update_attrs(orig, keys, override) # if we found the actual commit, remove all other means of specifying it, # like tag or branch. if "commit" in result: result.pop("tag", None) result.pop("branch", None) return result _common_attrs = { "remote": attr.string(mandatory = True), "commit": attr.string(default = ""), "shallow_since": attr.string(default = ""), "tag": attr.string(default = ""), "branch": attr.string(default = ""), "init_submodules": attr.bool(default = False), "verbose": attr.bool(default = False), "strip_prefix": attr.string(default = ""), "patches": attr.label_list(default = []), "patch_tool": attr.string(default = "patch"), "patch_args": attr.string_list(default = ["-p0"]), "patch_cmds": attr.string_list(default = []), } _new_git_repository_attrs = dict(_common_attrs.items() + { "build_file": attr.label(allow_single_file = True), "build_file_content": attr.string(), "workspace_file": attr.label(), "workspace_file_content": attr.string(), }.items()) def _new_git_repository_implementation(ctx): if ((not ctx.attr.build_file and not ctx.attr.build_file_content) or (ctx.attr.build_file and ctx.attr.build_file_content)): fail("Exactly one of build_file and build_file_content must be provided.") update = _clone_or_update(ctx) workspace_and_buildfile(ctx) patch(ctx) _remove_dot_git(ctx) return _update_git_attrs(ctx.attr, _new_git_repository_attrs.keys(), update) def _git_repository_implementation(ctx): update = _clone_or_update(ctx) patch(ctx) _remove_dot_git(ctx) return _update_git_attrs(ctx.attr, _common_attrs.keys(), update) new_git_repository = repository_rule( implementation = _new_git_repository_implementation, attrs = _new_git_repository_attrs, ) """Clone an external git repository. Clones a Git repository, checks out the specified tag, or commit, and makes its targets available for binding. Also determine the id of the commit actually checked out and its date, and return a dict with parameters that provide a reproducible version of this rule (which a tag not necessarily is). Args: name: A unique name for this repository. build_file: The file to use as the BUILD file for this repository. Either build_file or build_file_content must be specified. This attribute is an absolute label (use '@//' for the main repo). The file does not need to be named BUILD, but can be (something like BUILD.new-repo-name may work well for distinguishing it from the repository's actual BUILD files. build_file_content: The content for the BUILD file for this repository. Either build_file or build_file_content must be specified. workspace_file: The file to use as the `WORKSPACE` file for this repository. Either `workspace_file` or `workspace_file_content` can be specified, or neither, but not both. workspace_file_content: The content for the WORKSPACE file for this repository. Either `workspace_file` or `workspace_file_content` can be specified, or neither, but not both. branch: branch in the remote repository to checked out tag: tag in the remote repository to checked out commit: specific commit to be checked out Precisely one of branch, tag, or commit must be specified. shallow_since: an optional date, not after the specified commit; the argument is not allowed if a tag is specified (which allows cloning with depth 1). Setting such a date close to the specified commit allows for a more shallow clone of the repository, saving bandwidth and wall-clock time. init_submodules: Whether to clone submodules in the repository. remote: The URI of the remote Git repository. strip_prefix: A directory prefix to strip from the extracted files. patches: A list of files that are to be applied as patches after extracting the archive. patch_tool: the patch(1) utility to use. patch_args: arguments given to the patch tool, defaults to ["-p0"] patch_cmds: sequence of commands to be applied after patches are applied. """ git_repository = repository_rule( implementation = _git_repository_implementation, attrs = _common_attrs, ) """Clone an external git repository. Clones a Git repository, checks out the specified tag, or commit, and makes its targets available for binding. Also determine the id of the commit actually checked out and its date, and return a dict with parameters that provide a reproducible version of this rule (which a tag not necessarily is). Args: name: A unique name for this repository. init_submodules: Whether to clone submodules in the repository. remote: The URI of the remote Git repository. branch: branch in the remote repository to checked out tag: tag in the remote repository to checked out commit: specific commit to be checked out Precisely one of branch, tag, or commit must be specified. shallow_since: an optional date in the form YYYY-MM-DD, not after the specified commit; the argument is not allowed if a tag is specified (which allows cloning with depth 1). Setting such a date close to the specified commit allows for a more shallow clone of the repository, saving bandwidth and wall-clock time. strip_prefix: A directory prefix to strip from the extracted files. patches: A list of files that are to be applied as patches after extracting the archive. patch_tool: the patch(1) utility to use. patch_args: arguments given to the patch tool, defaults to ["-p0"] patch_cmds: sequence of commands to be applied after patches are applied. """
38.655556
107
0.678547
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "patch", "update_attrs", "workspace_and_buildfile") def _clone_or_update(ctx): if ((not ctx.attr.tag and not ctx.attr.commit and not ctx.attr.branch) or (ctx.attr.tag and ctx.attr.commit) or (ctx.attr.tag and ctx.attr.branch) or (ctx.attr.commit and ctx.attr.branch)): fail("Exactly one of commit, tag, or branch must be provided") shallow = "" if ctx.attr.commit: ref = ctx.attr.commit elif ctx.attr.tag: ref = "tags/" + ctx.attr.tag shallow = "--depth=1" else: ref = ctx.attr.branch shallow = "--depth=1" directory = str(ctx.path(".")) if ctx.attr.strip_prefix: directory = directory + "-tmp" if ctx.attr.shallow_since: if ctx.attr.tag: fail("shallow_since not allowed if a tag is specified; --depth=1 will be used for tags") if ctx.attr.branch: fail("shallow_since not allowed if a branch is specified; --depth=1 will be used for branches") shallow = "--shallow-since=%s" % ctx.attr.shallow_since ctx.report_progress("Cloning %s of %s" % (ref, ctx.attr.remote)) if (ctx.attr.verbose): print("git.bzl: Cloning or updating %s repository %s using strip_prefix of [%s]" % ( " (%s)" % shallow if shallow else "", ctx.name, ctx.attr.strip_prefix if ctx.attr.strip_prefix else "None", )) bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash" st = ctx.execute([bash_exe, "-c", """ cd {working_dir} set -ex ( cd {working_dir} && if ! ( cd '{dir_link}' && [[ "$(git rev-parse --git-dir)" == '.git' ]] ) >/dev/null 2>&1; then rm -rf '{directory}' '{dir_link}' git clone '{shallow}' '{remote}' '{directory}' || git clone '{remote}' '{directory}' fi git -C '{directory}' reset --hard {ref} || \ ((git -C '{directory}' fetch '{shallow}' origin {ref}:{ref} || \ git -C '{directory}' fetch origin {ref}:{ref}) && git -C '{directory}' reset --hard {ref}) git -C '{directory}' clean -xdf ) """.format( working_dir = ctx.path(".").dirname, dir_link = ctx.path("."), directory = directory, remote = ctx.attr.remote, ref = ref, shallow = shallow, )], environment = ctx.os.environ) if st.return_code: fail("error cloning %s:\n%s" % (ctx.name, st.stderr)) if ctx.attr.strip_prefix: dest_link = "{}/{}".format(directory, ctx.attr.strip_prefix) if not ctx.path(dest_link).exists: fail("strip_prefix at {} does not exist in repo".format(ctx.attr.strip_prefix)) ctx.symlink(dest_link, ctx.path(".")) if ctx.attr.init_submodules: ctx.report_progress("Updating submodules") st = ctx.execute([bash_exe, "-c", """ set -ex ( git -C '{directory}' submodule update --init --checkout --force ) """.format( directory = ctx.path("."), )], environment = ctx.os.environ) if st.return_code: fail("error updating submodules %s:\n%s" % (ctx.name, st.stderr)) ctx.report_progress("Recording actual commit") actual_commit = ctx.execute([ bash_exe, "-c", "(git -C '{directory}' log -n 1 --pretty='format:%H')".format( directory = ctx.path("."), ), ]).stdout shallow_date = ctx.execute([ bash_exe, "-c", "(git -C '{directory}' log -n 1 --pretty='format:%cd' --date=raw)".format( directory = ctx.path("."), ), ]).stdout return {"commit": actual_commit, "shallow_since": shallow_date} def _remove_dot_git(ctx): bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash" ctx.execute([ bash_exe, "-c", "rm -rf '{directory}'".format(directory = ctx.path(".git")), ]) def _update_git_attrs(orig, keys, override): result = update_attrs(orig, keys, override) if "commit" in result: result.pop("tag", None) result.pop("branch", None) return result _common_attrs = { "remote": attr.string(mandatory = True), "commit": attr.string(default = ""), "shallow_since": attr.string(default = ""), "tag": attr.string(default = ""), "branch": attr.string(default = ""), "init_submodules": attr.bool(default = False), "verbose": attr.bool(default = False), "strip_prefix": attr.string(default = ""), "patches": attr.label_list(default = []), "patch_tool": attr.string(default = "patch"), "patch_args": attr.string_list(default = ["-p0"]), "patch_cmds": attr.string_list(default = []), } _new_git_repository_attrs = dict(_common_attrs.items() + { "build_file": attr.label(allow_single_file = True), "build_file_content": attr.string(), "workspace_file": attr.label(), "workspace_file_content": attr.string(), }.items()) def _new_git_repository_implementation(ctx): if ((not ctx.attr.build_file and not ctx.attr.build_file_content) or (ctx.attr.build_file and ctx.attr.build_file_content)): fail("Exactly one of build_file and build_file_content must be provided.") update = _clone_or_update(ctx) workspace_and_buildfile(ctx) patch(ctx) _remove_dot_git(ctx) return _update_git_attrs(ctx.attr, _new_git_repository_attrs.keys(), update) def _git_repository_implementation(ctx): update = _clone_or_update(ctx) patch(ctx) _remove_dot_git(ctx) return _update_git_attrs(ctx.attr, _common_attrs.keys(), update) new_git_repository = repository_rule( implementation = _new_git_repository_implementation, attrs = _new_git_repository_attrs, ) git_repository = repository_rule( implementation = _git_repository_implementation, attrs = _common_attrs, )
true
true
1c45dee6a500635eabaad87541b2131f941e9a46
69,722
py
Python
kgtk/io/kgtkreader.py
dgarijo/kgtk
f624754e91afbad8d28006e716189b43d367ef04
[ "MIT" ]
null
null
null
kgtk/io/kgtkreader.py
dgarijo/kgtk
f624754e91afbad8d28006e716189b43d367ef04
[ "MIT" ]
null
null
null
kgtk/io/kgtkreader.py
dgarijo/kgtk
f624754e91afbad8d28006e716189b43d367ef04
[ "MIT" ]
null
null
null
"""Read a KGTK node or edge file in TSV format. Normally, results are obtained as rows of string values obtained by iteration on the KgtkReader object. Alternative iterators are available to return the results as: * concise_rows: lists of strings with empty fields converted to None * kgtk_values: lists of KgtkValue objects * concise_kgtk_values: lists of KgtkValue objects with empty fields converted to None * dicts: dicts of strings * dicts(concise=True): dicts of strings with empty fields omitted * kgtk_value_dicts: dicts of KgtkValue objects * kgtk_value_dicts(concise=True): dicts of KgtkValue objects with empty fields omitted TODO: Add support for alternative envelope formats, such as JSON. """ from argparse import ArgumentParser, _ArgumentGroup, Namespace, SUPPRESS import attr import bz2 from enum import Enum import gzip import lz4 # type: ignore import lzma from multiprocessing import Process, Queue from pathlib import Path import sys import typing from kgtk.kgtkformat import KgtkFormat from kgtk.io.kgtkbase import KgtkBase from kgtk.utils.argparsehelpers import optional_bool from kgtk.utils.closableiter import ClosableIter, ClosableIterTextIOWrapper from kgtk.utils.enumnameaction import EnumNameAction from kgtk.utils.gzipprocess import GunzipProcess from kgtk.utils.validationaction import ValidationAction from kgtk.value.kgtkvalue import KgtkValue from kgtk.value.kgtkvalueoptions import KgtkValueOptions, DEFAULT_KGTK_VALUE_OPTIONS class KgtkReaderMode(Enum): """ There are four file reading modes: """ NONE = 0 # Enforce neither edge nore node file required columns EDGE = 1 # Enforce edge file required columns NODE = 2 # Enforce node file require columns AUTO = 3 # Automatically decide whether to enforce edge or node file required columns @attr.s(slots=True, frozen=True) class KgtkReaderOptions(): ERROR_LIMIT_DEFAULT: int = 1000 GZIP_QUEUE_SIZE_DEFAULT: int = GunzipProcess.GZIP_QUEUE_SIZE_DEFAULT mode: KgtkReaderMode = attr.ib(validator=attr.validators.instance_of(KgtkReaderMode), default=KgtkReaderMode.AUTO) # The column separator is normally tab. column_separator: str = attr.ib(validator=attr.validators.instance_of(str), default=KgtkFormat.COLUMN_SEPARATOR) # supply a missing header record or override an existing header record. force_column_names: typing.Optional[typing.List[str]] = attr.ib(validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(str), iterable_validator=attr.validators.instance_of(list))), default=None) skip_header_record: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) # Data record sampling, pre-validation. # # 1) Optionally read and skip a specific number of initial records, or record_limit - tail_count, # whichever is greater. # 2) Optionally pass through every nth record relative to the number of records read. # 3) Optionally limit the total number of records read. initial_skip_count: int = attr.ib(validator=attr.validators.instance_of(int), default=0) every_nth_record: int = attr.ib(validator=attr.validators.instance_of(int), default=1) record_limit: typing.Optional[int] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(int)), default=None) tail_count: typing.Optional[int] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(int)), default=None) # How do we handle errors? error_limit: int = attr.ib(validator=attr.validators.instance_of(int), default=ERROR_LIMIT_DEFAULT) # >0 ==> limit error reports # Top-level validation controls: repair_and_validate_lines: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) repair_and_validate_values: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) # Ignore empty lines, comments, and all whitespace lines, etc.? empty_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE) comment_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE) whitespace_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE) # Ignore records with empty values in certain fields: blank_required_field_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE) # Ignore records with too many or too few fields? short_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN) long_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN) # How should header errors be processed? header_error_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXIT) unsafe_column_name_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.REPORT) # Validate data cell values? invalid_value_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN) prohibited_list_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN) # Repair records with too many or too few fields? fill_short_lines: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) truncate_long_lines: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) # Other implementation options? compression_type: typing.Optional[str] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(str)), default=None) # TODO: use an Enum gzip_in_parallel: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) gzip_queue_size: int = attr.ib(validator=attr.validators.instance_of(int), default=GZIP_QUEUE_SIZE_DEFAULT) @classmethod def add_arguments(cls, parser: ArgumentParser, mode_options: bool = False, default_mode: KgtkReaderMode = KgtkReaderMode.AUTO, validate_by_default: bool = False, expert: bool = False, defaults: bool = True, who: str = "", ): # This helper function makes it easy to suppress options from # The help message. The options are still there, and initialize # what they need to initialize. def h(msg: str)->str: if expert: return msg else: return SUPPRESS # This helper function decices whether or not to include defaults # in argument declarations. If we plan to make arguments with # prefixes and fallbacks, the fallbacks (the ones without prefixes) # should get default values, while the prefixed arguments should # not get defaults. # # Note: In obscure circumstances (EnumNameAction, I'm looking at you), # explicitly setting "default=None" may fail, whereas omitting the # "default=" phrase succeeds. # # TODO: continue researching these issues. def d(default: typing.Any)->typing.Mapping[str, typing.Any]: if defaults: return {"default": default} else: return { } prefix1: str = "--" if len(who) == 0 else "--" + who + "-" prefix2: str = "" if len(who) == 0 else who + "_" prefix3: str = "" if len(who) == 0 else who + ": " prefix4: str = "" if len(who) == 0 else who + " file " fgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "File options"), h("Options affecting " + prefix4 + "processing.")) fgroup.add_argument(prefix1 + "column-separator", dest=prefix2 + "column_separator", help=h(prefix3 + "Column separator (default=<TAB>)."), # TODO: provide the default with escapes, e.g. \t type=str, **d(default=KgtkFormat.COLUMN_SEPARATOR)) # TODO: use an Enum or add choices. fgroup.add_argument(prefix1 + "compression-type", dest=prefix2 + "compression_type", help=h(prefix3 + "Specify the compression type (default=%(default)s).")) fgroup.add_argument(prefix1 + "error-limit", dest=prefix2 + "error_limit", help=h(prefix3 + "The maximum number of errors to report before failing (default=%(default)s)"), type=int, **d(default=cls.ERROR_LIMIT_DEFAULT)) fgroup.add_argument(prefix1 + "gzip-in-parallel", dest=prefix2 + "gzip_in_parallel", metavar="optional True|False", help=h(prefix3 + "Execute gzip in parallel (default=%(default)s)."), type=optional_bool, nargs='?', const=True, **d(default=False)) fgroup.add_argument(prefix1 + "gzip-queue-size", dest=prefix2 + "gzip_queue_size", help=h(prefix3 + "Queue size for parallel gzip (default=%(default)s)."), type=int, **d(default=cls.GZIP_QUEUE_SIZE_DEFAULT)) if mode_options: fgroup.add_argument(prefix1 + "mode", dest=prefix2 + "mode", help=h(prefix3 + "Determine the KGTK file mode (default=%(default)s)."), type=KgtkReaderMode, action=EnumNameAction, **d(default_mode)) hgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "Header parsing"), h("Options affecting " + prefix4 + "header parsing.")) hgroup.add_argument(prefix1 + "force-column-names", dest=prefix2 + "force_column_names", help=h(prefix3 + "Force the column names (default=None)."), nargs='+') hgroup.add_argument(prefix1 + "header-error-action", dest=prefix2 + "header_error_action", help=h(prefix3 + "The action to take when a header error is detected. Only ERROR or EXIT are supported (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXIT)) hgroup.add_argument(prefix1 + "skip-header-record", dest=prefix2 + "skip_header_record", metavar="optional True|False", help=h(prefix3 + "Skip the first record when forcing column names (default=%(default)s)."), type=optional_bool, nargs='?', const=True, **d(default=False)) hgroup.add_argument(prefix1 + "unsafe-column-name-action", dest=prefix2 + "unsafe_column_name_action", help=h(prefix3 + "The action to take when a column name is unsafe (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.REPORT)) sgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "Pre-validation sampling"), h("Options affecting " + prefix4 + "pre-validation data line sampling.")) sgroup.add_argument(prefix1 + "initial-skip-count", dest=prefix2 + "initial_skip_count", help=h(prefix3 + "The number of data records to skip initially (default=do not skip)."), type=int, **d(default=0)) sgroup.add_argument(prefix1 + "every-nth-record", dest=prefix2 + "every_nth_record", help=h(prefix3 + "Pass every nth record (default=pass all records)."), type=int, **d(default=1)) sgroup.add_argument(prefix1 + "record-limit", dest=prefix2 + "record_limit", help=h(prefix3 + "Limit the number of records read (default=no limit)."), type=int, **d(default=None)) sgroup.add_argument(prefix1 + "tail-count", dest=prefix2 + "tail_count", help=h(prefix3 + "Pass this number of records (default=no tail processing)."), type=int, **d(default=None)) lgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "Line parsing"), h("Options affecting " + prefix4 + "data line parsing.")) lgroup.add_argument(prefix1 + "repair-and-validate-lines", dest=prefix2 + "repair_and_validate_lines", metavar="optional True|False", help=h(prefix3 + "Repair and validate lines (default=%(default)s)."), type=optional_bool, nargs='?', const=True, **d(default=validate_by_default)) lgroup.add_argument(prefix1 + "repair-and-validate-values", dest=prefix2 + "repair_and_validate_values", metavar="optional True|False", help=h(prefix3 + "Repair and validate values (default=%(default)s)."), type=optional_bool, nargs='?', const=True, **d(default=validate_by_default)) lgroup.add_argument(prefix1 + "blank-required-field-line-action", dest=prefix2 + "blank_required_field_line_action", help=h(prefix3 + "The action to take when a line with a blank node1, node2, or id field (per mode) is detected (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE)) lgroup.add_argument(prefix1 + "comment-line-action", dest=prefix2 + "comment_line_action", help=h(prefix3 + "The action to take when a comment line is detected (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE)) lgroup.add_argument(prefix1 + "empty-line-action", dest=prefix2 + "empty_line_action", help=h(prefix3 + "The action to take when an empty line is detected (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE)) lgroup.add_argument(prefix1 + "fill-short-lines", dest=prefix2 + "fill_short_lines", metavar="optional True|False", help=h(prefix3 + "Fill missing trailing columns in short lines with empty values (default=%(default)s)."), type=optional_bool, nargs='?', const=True, **d(default=False)) lgroup.add_argument(prefix1 + "invalid-value-action", dest=prefix2 + "invalid_value_action", help=h(prefix3 + "The action to take when a data cell value is invalid (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN)) lgroup.add_argument(prefix1 + "long-line-action", dest=prefix2 + "long_line_action", help=h(prefix3 + "The action to take when a long line is detected (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN)) lgroup.add_argument(prefix1 + "prohibited-list-action", dest=prefix2 + "prohibited list_action", help=h(prefix3 + "The action to take when a data cell contains a prohibited list (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN)) lgroup.add_argument(prefix1 + "short-line-action", dest=prefix2 + "short_line_action", help=h(prefix3 + "The action to take when a short line is detected (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN)) lgroup.add_argument(prefix1 + "truncate-long-lines", dest=prefix2 + "truncate_long_lines", help=h(prefix3 + "Remove excess trailing columns in long lines (default=%(default)s)."), type=optional_bool, nargs='?', const=True, **d(default=False)) lgroup.add_argument(prefix1 + "whitespace-line-action", dest=prefix2 + "whitespace_line_action", help=h(prefix3 + "The action to take when a whitespace line is detected (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE)) @classmethod # Build the value parsing option structure. def from_dict(cls, d: dict, who: str = "", mode: typing.Optional[KgtkReaderMode] = None, fallback: bool = False, )->'KgtkReaderOptions': prefix: str = "" # The destination name prefix. if len(who) > 0: prefix = who + "_" # TODO: Figure out how to type check this method. def lookup(name: str, default): prefixed_name = prefix + name if prefixed_name in d and d[prefixed_name] is not None: return d[prefixed_name] elif fallback and name in d and d[name] is not None: return d[name] else: return default reader_mode: KgtkReaderMode if mode is not None: reader_mode = mode else: reader_mode = lookup("mode", KgtkReaderMode.AUTO) return cls( blank_required_field_line_action=lookup("blank_required_field_line_action", ValidationAction.EXCLUDE), column_separator=lookup("column_separator", KgtkFormat.COLUMN_SEPARATOR), comment_line_action=lookup("comment_line_action", ValidationAction.EXCLUDE), compression_type=lookup("compression_type", None), empty_line_action=lookup("empty_line_action", ValidationAction.EXCLUDE), error_limit=lookup("error_limit", cls.ERROR_LIMIT_DEFAULT), every_nth_record=lookup("every_nth_record", 1), fill_short_lines=lookup("fill_short_lines", False), force_column_names=lookup("force_column_names", None), gzip_in_parallel=lookup("gzip_in_parallel", False), gzip_queue_size=lookup("gzip_queue_size", KgtkReaderOptions.GZIP_QUEUE_SIZE_DEFAULT), header_error_action=lookup("header_error_action", ValidationAction.EXCLUDE), initial_skip_count=lookup("initial_skip_count", 0), invalid_value_action=lookup("invalid_value_action", ValidationAction.REPORT), long_line_action=lookup("long_line_action", ValidationAction.EXCLUDE), mode=reader_mode, prohibited_list_action=lookup("prohibited_list_action", ValidationAction.REPORT), record_limit=lookup("record_limit", None), repair_and_validate_lines=lookup("repair_and_validate_lines", False), repair_and_validate_values=lookup("repair_and_validate_values", False), short_line_action=lookup("short_line_action", ValidationAction.EXCLUDE), skip_header_record=lookup("skip_header_recordb", False), tail_count=lookup("tail_count", None), truncate_long_lines=lookup("truncate_long_lines", False), unsafe_column_name_action=lookup("unsafe_column_name_action", ValidationAction.REPORT), whitespace_line_action=lookup("whitespace_line_action", ValidationAction.EXCLUDE), ) # Build the value parsing option structure. @classmethod def from_args(cls, args: Namespace, who: str = "", mode: typing.Optional[KgtkReaderMode] = None, fallback: bool = False, )->'KgtkReaderOptions': return cls.from_dict(vars(args), who=who, mode=mode, fallback=fallback) def show(self, who: str="", out: typing.TextIO=sys.stderr): prefix: str = "--" if len(who) == 0 else "--" + who + "-" print("%smode=%s" % (prefix, self.mode.name), file=out) print("%scolumn-separator=%s" % (prefix, repr(self.column_separator)), file=out) if self.force_column_names is not None: print("%sforce-column-names=%s" % (prefix, " ".join(self.force_column_names)), file=out) print("%sskip-header-record=%s" % (prefix, str(self.skip_header_record)), file=out) print("%serror-limit=%s" % (prefix, str(self.error_limit)), file=out) print("%srepair-and-validate-lines=%s" % (prefix, str(self.repair_and_validate_lines)), file=out) print("%srepair-and-validate-values=%s" % (prefix, str(self.repair_and_validate_values)), file=out) print("%sempty-line-action=%s" % (prefix, self.empty_line_action.name), file=out) print("%scomment-line-action=%s" % (prefix, self.comment_line_action.name), file=out) print("%swhitespace-line-action=%s" % (prefix, self.whitespace_line_action.name), file=out) print("%sblank-required-field-line-action=%s" % (prefix, self.blank_required_field_line_action.name), file=out) print("%sshort-line-action=%s" % (prefix, self.short_line_action.name), file=out) print("%slong-line-action=%s" % (prefix, self.long_line_action.name), file=out) print("%sheader-error-action=%s" % (prefix, self.header_error_action.name), file=out) print("%sunsafe-column-name-action=%s" % (prefix, self.unsafe_column_name_action.name), file=out) print("%sinvalid-value-action=%s" % (prefix, self.invalid_value_action.name), file=out) print("%sinitial-skip-count=%s" % (prefix, str(self.initial_skip_count)), file=out) print("%severy-nth-record=%s" % (prefix, str(self.every_nth_record)), file=out) if self.record_limit is not None: print("%srecord-limit=%s" % (prefix, str(self.record_limit)), file=out) if self.tail_count is not None: print("%stail-count=%s" % (prefix, str(self.tail_count)), file=out) print("%sinitial-skip-count=%s" % (prefix, str(self.initial_skip_count)), file=out) print("%sprohibited-list-action=%s" % (prefix, self.prohibited_list_action.name), file=out) print("%sfill-short-lines=%s" % (prefix, str(self.fill_short_lines)), file=out) print("%struncate-long-lines=%s" % (prefix, str(self.truncate_long_lines)), file=out) if self.compression_type is not None: print("%scompression-type=%s" % (prefix, str(self.compression_type)), file=out) print("%sgzip-in-parallel=%s" % (prefix, str(self.gzip_in_parallel)), file=out) print("%sgzip-queue-size=%s" % (prefix, str(self.gzip_queue_size)), file=out) DEFAULT_KGTK_READER_OPTIONS: KgtkReaderOptions = KgtkReaderOptions() @attr.s(slots=True, frozen=False) class KgtkReader(KgtkBase, ClosableIter[typing.List[str]]): file_path: typing.Optional[Path] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Path))) source: ClosableIter[str] = attr.ib() # Todo: validate # TODO: Fix this validator: # options: KgtkReaderOptions = attr.ib(validator=attr.validators.instance_of(KgtkReaderOptions)) options: KgtkReaderOptions = attr.ib() value_options: KgtkValueOptions = attr.ib(validator=attr.validators.instance_of(KgtkValueOptions)) column_names: typing.List[str] = attr.ib(validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(str), iterable_validator=attr.validators.instance_of(list))) # For convenience, the count of columns. This is the same as len(column_names). column_count: int = attr.ib(validator=attr.validators.instance_of(int)) column_name_map: typing.Mapping[str, int] = attr.ib(validator=attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(int))) # The actual mode used. # # TODO: fix the validator. # mode: KgtkReaderMode = attr.ib(validator=attr.validators.instance_of(KgtkReaderMode), default=KgtkReaderMode.NONE) mode: KgtkReaderMode = attr.ib(default=KgtkReaderMode.NONE) # The index of the mandatory/aliased columns. -1 means missing: node1_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # edge file label_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # edge file node2_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # edge file id_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # node file data_lines_read: int = attr.ib(validator=attr.validators.instance_of(int), default=0) data_lines_skipped: int = attr.ib(validator=attr.validators.instance_of(int), default=0) data_lines_passed: int = attr.ib(validator=attr.validators.instance_of(int), default=0) data_lines_ignored: int = attr.ib(validator=attr.validators.instance_of(int), default=0) data_errors_reported: int = attr.ib(validator=attr.validators.instance_of(int), default=0) # Is this an edge file or a node file? is_edge_file: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) is_node_file: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) # Feedback and error output: error_file: typing.TextIO = attr.ib(default=sys.stderr) verbose: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) very_verbose: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) @classmethod def _default_options( cls, options: typing.Optional[KgtkReaderOptions] = None, value_options: typing.Optional[KgtkValueOptions] = None, )->typing.Tuple[KgtkReaderOptions, KgtkValueOptions]: # Supply the default reader and value options: if options is None: options = DEFAULT_KGTK_READER_OPTIONS if value_options is None: value_options = DEFAULT_KGTK_VALUE_OPTIONS return (options, value_options) @classmethod def open(cls, file_path: typing.Optional[Path], who: str = "input", error_file: typing.TextIO = sys.stderr, mode: typing.Optional[KgtkReaderMode] = None, options: typing.Optional[KgtkReaderOptions] = None, value_options: typing.Optional[KgtkValueOptions] = None, verbose: bool = False, very_verbose: bool = False)->"KgtkReader": """ Opens a KGTK file, which may be an edge file or a node file. The appropriate reader is returned. """ # Supply the default reader and value options: (options, value_options) = cls._default_options(options, value_options) source: ClosableIter[str] = cls._openfile(file_path, options=options, error_file=error_file, verbose=verbose) # Read the kgtk file header and split it into column names. We get the # header back, too, for use in debugging and error messages. header: str column_names: typing.List[str] (header, column_names) = cls._build_column_names(source, options, error_file=error_file, verbose=verbose) # Check for unsafe column names. cls.check_column_names(column_names, header_line=header, who=who, error_action=options.unsafe_column_name_action, error_file=error_file) # Build a map from column name to column index. column_name_map: typing.Mapping[str, int] = cls.build_column_name_map(column_names, header_line=header, who=who, error_action=options.header_error_action, error_file=error_file) # Should we automatically determine if this is an edge file or a node file? if mode is None: mode = options.mode is_edge_file: bool = False is_node_file: bool = False if mode is KgtkReaderMode.AUTO: # If we have a node1 (or alias) column, then this must be an edge file. Otherwise, assume it is a node file. node1_idx: int = cls.get_column_idx(cls.NODE1_COLUMN_NAMES, column_name_map, header_line=header, who=who, error_action=options.header_error_action, error_file=error_file, is_optional=True) if node1_idx >= 0: is_edge_file = True is_node_file = False if verbose: print("%s column found, this is a KGTK edge file" % column_names[node1_idx], file=error_file, flush=True) else: is_edge_file = False is_node_file = True if verbose: print("node1 column not found, assuming this is a KGTK node file", file=error_file, flush=True) elif mode is KgtkReaderMode.EDGE: is_edge_file = True elif mode is KgtkReaderMode.NODE: is_node_file = True elif mode is KgtkReaderMode.NONE: pass # Get the indices of the special columns. node1_column_idx: int label_column_idx: int node2_column_idx: int id_column_idx: int (node1_column_idx, label_column_idx, node2_column_idx, id_column_idx) = cls.get_special_columns(column_name_map, header_line=header, who=who, error_action=options.header_error_action, error_file=error_file, is_edge_file=is_edge_file, is_node_file=is_node_file) if verbose: print("KgtkReader: Special columns: node1=%d label=%d node2=%d id=%d" % (node1_column_idx, label_column_idx, node2_column_idx, id_column_idx), file=error_file, flush=True) if is_edge_file: # We'll instantiate an EdgeReader, which is a subclass of KgtkReader. # The EdgeReader import is deferred to avoid circular imports. from kgtk.io.edgereader import EdgeReader if verbose: print("KgtkReader: Reading an edge file.", file=error_file, flush=True) cls = EdgeReader elif is_node_file: # We'll instantiate an NodeReader, which is a subclass of KgtkReader. # The NodeReader import is deferred to avoid circular imports. from kgtk.io.nodereader import NodeReader if verbose: print("KgtkReader: Reading an node file.", file=error_file, flush=True) cls = NodeReader return cls(file_path=file_path, source=source, column_names=column_names, column_name_map=column_name_map, column_count=len(column_names), mode=mode, node1_column_idx=node1_column_idx, label_column_idx=label_column_idx, node2_column_idx=node2_column_idx, id_column_idx=id_column_idx, error_file=error_file, options=options, value_options=value_options, is_edge_file=is_edge_file, is_node_file=is_node_file, verbose=verbose, very_verbose=very_verbose, ) @classmethod def _open_compressed_file(cls, compression_type: str, file_name: str, file_or_path: typing.Union[Path, typing.TextIO], who: str, error_file: typing.TextIO, verbose: bool)->typing.TextIO: # TODO: find a better way to coerce typing.IO[Any] to typing.TextIO if compression_type in [".gz", "gz"]: if verbose: print("%s: reading gzip %s" % (who, file_name), file=error_file, flush=True) return gzip.open(file_or_path, mode="rt") # type: ignore elif compression_type in [".bz2", "bz2"]: if verbose: print("%s: reading bz2 %s" % (who, file_name), file=error_file, flush=True) return bz2.open(file_or_path, mode="rt") # type: ignore elif compression_type in [".xz", "xz"]: if verbose: print("%s: reading lzma %s" % (who, file_name), file=error_file, flush=True) return lzma.open(file_or_path, mode="rt") # type: ignore elif compression_type in [".lz4", "lz4"]: if verbose: print("%s: reading lz4 %s" % (who, file_name), file=error_file, flush=True) return lz4.frame.open(file_or_path, mode="rt") # type: ignore else: # TODO: throw a better exception. raise ValueError("%s: Unexpected compression_type '%s'" % (who, compression_type)) @classmethod def _openfile(cls, file_path: typing.Optional[Path], options: KgtkReaderOptions, error_file: typing.TextIO, verbose: bool)->ClosableIter[str]: who: str = cls.__name__ if file_path is None or str(file_path) == "-": if options.compression_type is not None and len(options.compression_type) > 0: return ClosableIterTextIOWrapper(cls._open_compressed_file(options.compression_type, "-", sys.stdin, who, error_file, verbose)) else: if verbose: print("%s: reading stdin" % who, file=error_file, flush=True) return ClosableIterTextIOWrapper(sys.stdin) if verbose: print("%s: File_path.suffix: %s" % (who, file_path.suffix), file=error_file, flush=True) gzip_file: typing.TextIO if options.compression_type is not None and len(options.compression_type) > 0: gzip_file = cls._open_compressed_file(options.compression_type, str(file_path), file_path, who, error_file, verbose) elif file_path.suffix in [".bz2", ".gz", ".lz4", ".xz"]: gzip_file = cls._open_compressed_file(file_path.suffix, str(file_path), file_path, who, error_file, verbose) else: if verbose: print("%s: reading file %s" % (who, str(file_path)), file=error_file, flush=True) return ClosableIterTextIOWrapper(open(file_path, "r")) if options.gzip_in_parallel: gzip_thread: GunzipProcess = GunzipProcess(gzip_file, Queue(options.gzip_queue_size)) gzip_thread.start() return gzip_thread else: return ClosableIterTextIOWrapper(gzip_file) @classmethod def _build_column_names(cls, source: ClosableIter[str], options: KgtkReaderOptions, error_file: typing.TextIO, verbose: bool = False, )->typing.Tuple[str, typing.List[str]]: """ Read the kgtk file header and split it into column names. """ column_names: typing.List[str] if options.force_column_names is None: # Read the column names from the first line, stripping end-of-line characters. # # TODO: if the read fails, throw a more useful exception with the line number. try: header: str = next(source).rstrip("\r\n") except StopIteration: raise ValueError("No header line in file") if verbose: print("header: %s" % header, file=error_file, flush=True) # Split the first line into column names. return header, header.split(options.column_separator) else: # Skip the first record to override the column names in the file. # Do not skip the first record if the file does not hae a header record. if options.skip_header_record: try: next(source) except StopIteration: raise ValueError("No header line to skip") # Use the forced column names. return options.column_separator.join(options.force_column_names), options.force_column_names def close(self): self.source.close() def exclude_line(self, action: ValidationAction, msg: str, line: str)->bool: """ Take a validation action. Returns True if the line should be excluded. """ result: bool if action == ValidationAction.PASS: return False # Silently pass the line through elif action == ValidationAction.REPORT: result= False # Report the issue then pass the line. elif action == ValidationAction.EXCLUDE: return True # Silently exclude the line elif action == ValidationAction.COMPLAIN: result = True # Report the issue then exclude the line. elif action == ValidationAction.ERROR: # Immediately raise an exception. raise ValueError("In input data line %d, %s: %s" % (self.data_lines_read, msg, line)) elif action == ValidationAction.EXIT: print("Data line %d:\n%s\n%s" % (self.data_lines_read, line, msg), file=self.error_file, flush=True) sys.exit(1) # print("In input data line %d, %s: %s" % (self.data_lines_read, msg, line), file=self.error_file, flush=True) print("Data line %d:\n%s\n%s" % (self.data_lines_read, line, msg), file=self.error_file, flush=True) self.data_errors_reported += 1 if self.options.error_limit > 0 and self.data_errors_reported >= self.options.error_limit: raise ValueError("Too many data errors, exiting.") return result # Get the next edge values as a list of strings. def nextrow(self)-> typing.List[str]: row: typing.List[str] repair_and_validate_lines: bool = self.options.repair_and_validate_lines repair_and_validate_values: bool = self.options.repair_and_validate_values # Compute the initial skip count skip_count: int = self.options.initial_skip_count if self.options.record_limit is not None and self.options.tail_count is not None: # Compute the tail count. tail_skip_count: int = self.options.record_limit - self.options.tail_count if tail_skip_count > skip_count: skip_count = tail_skip_count # Take the larger skip count. # This loop accomodates lines that are ignored. while (True): # Has a record limit been specified and have we reached it? if self.options.record_limit is not None: if self.data_lines_read >= self.options.record_limit: # Close the source and stop the iteration. self.source.close() # Do we need to guard against repeating this call? raise StopIteration # Read a line from the source line: str try: line = next(self.source) # Will throw StopIteration except StopIteration as e: # Close the input file! # # TODO: implement a close() routine and/or whatever it takes to support "with". self.source.close() # Do we need to guard against repeating this call? raise e # Count the data line read. self.data_lines_read += 1 # Data sampling: if self.data_lines_read <= skip_count: self.data_lines_skipped += 1 continue if self.options.every_nth_record > 1: if self.data_lines_read % self.options.every_nth_record != 0: self.data_lines_skipped += 1 continue # Strip the end-of-line characters: line = line.rstrip("\r\n") if repair_and_validate_lines: # TODO: Use a sepearate option to control this. if self.very_verbose: print("'%s'" % line, file=self.error_file, flush=True) # Ignore empty lines. if self.options.empty_line_action != ValidationAction.PASS and len(line) == 0: if self.exclude_line(self.options.empty_line_action, "saw an empty line", line): continue # Ignore comment lines: if self.options.comment_line_action != ValidationAction.PASS and line[0] == self.COMMENT_INDICATOR: if self.exclude_line(self.options.comment_line_action, "saw a comment line", line): continue # Ignore whitespace lines if self.options.whitespace_line_action != ValidationAction.PASS and line.isspace(): if self.exclude_line(self.options.whitespace_line_action, "saw a whitespace line", line): continue row = line.split(self.options.column_separator) if repair_and_validate_lines: # Optionally fill missing trailing columns with empty row: if self.options.fill_short_lines and len(row) < self.column_count: while len(row) < self.column_count: row.append("") # Optionally remove extra trailing columns: if self.options.truncate_long_lines and len(row) > self.column_count: row = row[:self.column_count] # Optionally validate that the line contained the right number of columns: # # When we report line numbers in error messages, line 1 is the first line after the header line. if self.options.short_line_action != ValidationAction.PASS and len(row) < self.column_count: if self.exclude_line(self.options.short_line_action, "Required %d columns, saw %d: '%s'" % (self.column_count, len(row), line), line): continue if self.options.long_line_action != ValidationAction.PASS and len(row) > self.column_count: if self.exclude_line(self.options.long_line_action, "Required %d columns, saw %d (%d extra): '%s'" % (self.column_count, len(row), len(row) - self.column_count, line), line): continue if self._ignore_if_blank_fields(row, line): continue if repair_and_validate_values: if self.options.invalid_value_action != ValidationAction.PASS: # TODO: find a way to optionally cache the KgtkValue objects # so we don't have to create them a second time in the conversion # and iterator methods below. if self._ignore_invalid_values(row, line): continue if self.options.prohibited_list_action != ValidationAction.PASS: if self._ignore_prohibited_lists(row, line): continue self.data_lines_passed += 1 # TODO: User a seperate option to control this. # if self.very_verbose: # self.error_file.write(".") # self.error_file.flush() return row # This is both and iterable and an iterator object. def __iter__(self)->typing.Iterator[typing.List[str]]: return self # Get the next row values as a list of strings. # TODO: Convert integers, coordinates, etc. to Python types def __next__(self)-> typing.List[str]: return self.nextrow() def concise_rows(self)->typing.Iterator[typing.List[typing.Optional[str]]]: """ Using a generator function, create an iterator that returns rows of fields as strings. Empty fields will be returned as None. """ while True: try: row: typing.List[str] = self.nextrow() except StopIteration: return # Copy the row, converting empty fields into None: results: typing.List[typing.Optional[str]] = [ ] field: str for field in row: if len(field) == 0: results.append(None) else: results.append(field) yield results def to_kgtk_values(self, row: typing.List[str], validate: bool = False, parse_fields: bool = False)->typing.List[KgtkValue]: """ Convert an input row into a list of KgtkValue instances. When validate is True, validate each KgtkValue object. """ results: typing.List[KgtkValue] = [ ] field: str for field in row: kv = KgtkValue(field, options=self.value_options, parse_fields=parse_fields) if validate: kv.validate() results.append(kv) return results def kgtk_values(self, validate: bool = False, parse_fields: bool = False )->typing.Iterator[typing.List[KgtkValue]]: """ Using a generator function, create an iterator that returns rows of fields as KgtkValue objects. When validate is True, validate each KgtkValue object. """ while True: try: yield self.to_kgtk_values(self.nextrow(), validate=validate, parse_fields=parse_fields) except StopIteration: return def to_concise_kgtk_values(self, row: typing.List[str], validate: bool = False, parse_fields: bool = False )->typing.List[typing.Optional[KgtkValue]]: """ Convert an input row into a list of KgtkValue instances. Empty fields will be returned as None. When validate is True, validate each KgtkValue object. """ results: typing.List[typing.Optional[KgtkValue]] = [ ] field: str for field in row: if len(field) == 0: results.append(None) else: kv = KgtkValue(field, options=self.value_options, parse_fields=parse_fields) if validate: kv.validate() results.append(kv) return results def concise_kgtk_values(self, validate: bool = False, parse_fields: bool = False )->typing.Iterator[typing.List[typing.Optional[KgtkValue]]]: """ Using a generator function, create an iterator that returns rows of fields as KgtkValue objects, with empty fields returned as None. When validate is True, validate each KgtkValue object. """ while True: try: yield self.to_concise_kgtk_values(self.nextrow(), validate=validate) except StopIteration: return def to_dict(self, row: typing.List[str], concise: bool=False )->typing.Mapping[str, str]: """ Convert an input row into a dict of named fields. If concise is True, then empty fields will be skipped. """ results: typing.MutableMapping[str, str] = { } field: str idx: int = 0 # We'll use two seperate loops in anticipation of a modest # efficiency gain. if concise: for field in row: if len(field) > 0: results[self.column_names[idx]] = field idx += 1 else: for field in row: results[self.column_names[idx]] = field idx += 1 return results def dicts(self, concise: bool=False )->typing.Iterator[typing.Mapping[str, str]]: """ Using a generator function, create an iterator that returns each row as a dict of named fields. If concise is True, then empty fields will be skipped. """ while True: try: yield self.to_dict(self.nextrow(), concise=concise) except StopIteration: return def to_kgtk_value_dict(self, row: typing.List[str], validate: bool=False, parse_fields: bool=False, concise: bool=False )->typing.Mapping[str, KgtkValue]: """ Convert an input row into a dict of named fields. If concise is True, then empty fields will be skipped. When validate is True, validate each KgtkValue object. """ results: typing.MutableMapping[str, KgtkValue] = { } idx: int = 0 field: str for field in row: if concise and len(field) == 0: pass # Skip the empty field. else: kv = KgtkValue(field, options=self.value_options, parse_fields=parse_fields) if validate: kv.validate() results[self.column_names[idx]] = kv idx += 1 return results def kgtk_value_dicts(self, validate: bool=False, parse_fields: bool=False, concise: bool=False )->typing.Iterator[typing.Mapping[str, KgtkValue]]: """ Using a generator function, create an iterator that returns each row as a dict of named KgtkValue objects. If concise is True, then empty fields will be skipped. When validate is True, validate each KgtkValue object. """ while True: try: yield self.to_kgtk_value_dict(self.nextrow(), validate=validate, parse_fields=parse_fields, concise=concise) except StopIteration: return def _ignore_invalid_values(self, row: typing.List[str], line: str)->bool: """Give a row of values, validate each value. If we find one or more validation problems, we might want to emit error messages and we might want to ignore the entire row. Returns True to indicate that the row should be ignored (skipped). """ problems: typing.List[str] = [ ] # Build a list of problems. idx: int item: str for idx, item in enumerate(row): if len(item) > 0: # Optimize the common case of empty columns. kv: KgtkValue = KgtkValue(item, options=self.value_options) if not kv.is_valid(): problems.append("col %d (%s) value '%s'is an %s" % (idx, self.column_names[idx], item, kv.describe())) if kv.repaired: # If this value was repaired, update the item in the row. # # Warning: We expect this change to be seen by the caller. row[idx] = kv.value if len(problems) == 0: return False return self.exclude_line(self.options.invalid_value_action, "\n".join(problems), line) def _ignore_prohibited_list(self, idx: int, row: typing.List[str], line: str, problems: typing.List[str], ): if idx < 0: return item: str = row[idx] if KgtkFormat.LIST_SEPARATOR not in item: return if len(KgtkValue.split_list(item)) == 1: return problems.append("col %d (%s) value '%s'is a prohibited list" % (idx, self.column_names[idx], item)) def _ignore_prohibited_lists(self, row: typing.List[str], line: str)->bool: """ KGTK File Format v2 prohibits "|" lists in the node1, label, and node2 columns. """ problems: typing.List[str] = [ ] # Build a list of problems. self._ignore_prohibited_list(self.node1_column_idx, row, line, problems) self._ignore_prohibited_list(self.label_column_idx, row, line, problems) self._ignore_prohibited_list(self.node2_column_idx, row, line, problems) if len(problems) == 0: return False return self.exclude_line(self.options.invalid_value_action, "\n".join(problems), line) # May be overridden def _ignore_if_blank_fields(self, values: typing.List[str], line: str)->bool: return False # May be overridden def _skip_reserved_fields(self, column_name)->bool: return False def additional_column_names(self)->typing.List[str]: if self.is_edge_file: return KgtkBase.additional_edge_columns(self.column_names) elif self.is_node_file: return KgtkBase.additional_node_columns(self.column_names) else: # TODO: throw a better exception. raise ValueError("KgtkReader: Unknown Kgtk file type.") def merge_columns(self, additional_columns: typing.List[str])->typing.List[str]: """ Return a list that merges the current column names with an additional set of column names. """ merged_columns: typing.List[str] = self.column_names.copy() column_name: str for column_name in additional_columns: if column_name in self.column_name_map: continue merged_columns.append(column_name) return merged_columns def get_node1_column_index(self, column_name: typing.Optional[str] = None)->int: """ Get the node1 column index, unless an overriding column name is provided. Returns -1 if no column found. """ if column_name is None or len(column_name) == 0: return self.node1_column_idx else: return self.column_name_map.get(column_name, -1) def get_node1_canonical_name(self, column_name: typing.Optional[str]=None)->str: """ Get the canonical name for the node1 column, unless an overriding name is provided. """ if column_name is not None and len(column_name) > 0: return column_name else: return KgtkFormat.NODE1 def get_node1_column_actual_name(self, column_name: typing.Optional[str]=None)->str: """ Get the actual name for the node1 column or its overriding column. Return an empty string if the column was not found. """ idx: int = self.get_node1_column_index(column_name) if idx >= 0: return self.column_names[idx] else: return "" def get_label_column_index(self, column_name: typing.Optional[str] = None)->int: """ Get the label column index, unless an overriding column name is provided. Returns -1 if no column found. """ if column_name is None or len(column_name) == 0: return self.label_column_idx else: return self.column_name_map.get(column_name, -1) def get_label_canonical_name(self, column_name: typing.Optional[str]=None)->str: """ Get the canonical name for the label column, unless an overriding name is provided. """ if column_name is not None and len(column_name) > 0: return column_name else: return KgtkFormat.LABEL def get_label_column_actual_name(self, column_name: typing.Optional[str]=None)->str: """ Get the actual name for the label column or its overriding column. Return an empty string if the column was not found. """ idx: int = self.get_label_column_index(column_name) if idx >= 0: return self.column_names[idx] else: return "" def get_node2_column_index(self, column_name: typing.Optional[str] = None)->int: """ Get the node2 column index, unless an overriding column name is provided. Returns -1 if no column found. """ if column_name is None or len(column_name) == 0: return self.node2_column_idx else: return self.column_name_map.get(column_name, -1) def get_node2_canonical_name(self, column_name: typing.Optional[str]=None)->str: """ Get the canonical name for the node2 column, unless an overriding name is provided. """ if column_name is not None and len(column_name) > 0: return column_name else: return KgtkFormat.NODE2 def get_node2_column_actual_name(self, column_name: typing.Optional[str]=None)->str: """ Get the actual name for the node2 column or its overriding column. Return an empty string if the column was not found. """ idx: int = self.get_node2_column_index(column_name) if idx >= 0: return self.column_names[idx] else: return "" def get_id_column_index(self, column_name: typing.Optional[str] = None)->int: """ Get the id column index, unless an overriding column name is provided. Returns -1 if no column found. """ if column_name is None or len(column_name) == 0: return self.id_column_idx else: return self.column_name_map.get(column_name, -1) def get_id_canonical_name(self, column_name: typing.Optional[str]=None)->str: """ Get the canonical name for the id column, unless an overriding name is provided. """ if column_name is not None and len(column_name) > 0: return column_name else: return KgtkFormat.ID def get_id_column_actual_name(self, column_name: typing.Optional[str]=None)->str: """ Get the actual name for the id column or its overriding column. Return an empty string if the column was not found. """ idx: int = self.get_id_column_index(column_name) if idx >= 0: return self.column_names[idx] else: return "" @classmethod def add_debug_arguments(cls, parser: ArgumentParser, expert: bool = False): # This helper function makes it easy to suppress options from # The help message. The options are still there, and initialize # what they need to initialize. def h(msg: str)->str: if expert: return msg else: return SUPPRESS egroup: _ArgumentGroup = parser.add_argument_group(h("Error and feedback messages"), h("Send error messages and feedback to stderr or stdout, " + "control the amount of feedback and debugging messages.")) # Avoid the argparse bug that prevents these two arguments from having # their help messages suppressed directly. # # TODO: Is there a better fix? # # TODO: replace --errors-to-stdout and --errors-to-stderr with # --errors-to=stdout and --errors-to=stderr, using either an enum # or choices. That will avoid the argparse bug, too. if expert: errors_to = egroup.add_mutually_exclusive_group() errors_to.add_argument( "--errors-to-stdout", dest="errors_to_stdout", help="Send errors to stdout instead of stderr", action="store_true") errors_to.add_argument( "--errors-to-stderr", dest="errors_to_stderr", help="Send errors to stderr instead of stdout", action="store_true") else: egroup.add_argument( "--errors-to-stderr", dest="errors_to_stderr", help=h("Send errors to stderr instead of stdout"), action="store_true") egroup.add_argument( "--errors-to-stdout", dest="errors_to_stdout", help=h("Send errors to stdout instead of stderr"), action="store_true") egroup.add_argument( "--show-options", dest="show_options", help=h("Print the options selected (default=%(default)s)."), action='store_true') egroup.add_argument("-v", "--verbose", dest="verbose", help="Print additional progress messages (default=%(default)s).", action='store_true') egroup.add_argument( "--very-verbose", dest="very_verbose", help=h("Print additional progress messages (default=%(default)s)."), action='store_true') def main(): """ Test the KGTK file reader. """ # The EdgeReader import is deferred to avoid circular imports. from kgtk.io.edgereader import EdgeReader # The NodeReader import is deferred to avoid circular imports. from kgtk.io.nodereader import NodeReader parser = ArgumentParser() parser.add_argument(dest="kgtk_file", help="The KGTK file to read", type=Path, nargs="?") KgtkReader.add_debug_arguments(parser, expert=True) parser.add_argument( "--test", dest="test_method", help="The test to perform (default=%(default)s).", choices=["rows", "concise-rows", "kgtk-values", "concise-kgtk-values", "dicts", "concise-dicts", "kgtk-value-dicts", "concise-kgtk-value-dicts"], default="rows") parser.add_argument( "--test-validate", dest="test_validate", help="Validate KgtkValue objects in test (default=%(default)s).", type=optional_bool, nargs='?', const=True, default=False) KgtkReaderOptions.add_arguments(parser, mode_options=True, validate_by_default=True, expert=True) KgtkValueOptions.add_arguments(parser, expert=True) args: Namespace = parser.parse_args() error_file: typing.TextIO = sys.stdout if args.errors_to_stdout else sys.stderr # Build the option structures. reader_options: KgtkReaderOptions = KgtkReaderOptions.from_args(args) value_options: KgtkValueOptions = KgtkValueOptions.from_args(args) if args.show_options: print("--test=%s" % str(args.test), file=error_file) print("--test-validate=%s" % str(args.test_validate), file=error_file) reader_options.show(out=error_file) value_options.show(out=error_file) print("=======", file=error_file, flush=True) kr: KgtkReader = KgtkReader.open(args.kgtk_file, error_file = error_file, options=reader_options, value_options=value_options, verbose=args.verbose, very_verbose=args.very_verbose) line_count: int = 0 row: typing.List[str] kgtk_values: typing.List[KgtkValue] concise_kgtk_values: typing.List[typing.Optional[KgtkValue]] dict_row: typing.Mapping[str, str] kgtk_value_dict: typing.Mapping[str, str] if args.test_method == "rows": if args.verbose: print("Testing iterating over rows.", file=error_file, flush=True) for row in kr: line_count += 1 elif args.test_method == "concise-rows": if args.verbose: print("Testing iterating over concise rows.", file=error_file, flush=True) for row in kr.concise_rows(): line_count += 1 elif args.test_method == "kgtk-values": if args.verbose: print("Testing iterating over KgtkValue rows.", file=error_file, flush=True) for kgtk_values in kr.kgtk_values(validate=args.test_validate): line_count += 1 elif args.test_method == "concise-kgtk-values": if args.verbose: print("Testing iterating over concise KgtkValue rows.", file=error_file, flush=True) for kgtk_values in kr.concise_kgtk_values(validate=args.test_validate): line_count += 1 elif args.test_method == "dicts": if args.verbose: print("Testing iterating over dicts.", file=error_file, flush=True) for dict_row in kr.dicts(): line_count += 1 elif args.test_method == "concise-dicts": if args.verbose: print("Testing iterating over concise dicts.", file=error_file, flush=True) for dict_row in kr.dicts(concise=True): line_count += 1 elif args.test_method == "kgtk-value-dicts": if args.verbose: print("Testing iterating over KgtkValue dicts.", file=error_file, flush=True) for kgtk_value_dict in kr.kgtk_value_dicts(validate=args.test_validate): line_count += 1 elif args.test_method == "concise-kgtk-value-dicts": if args.verbose: print("Testing iterating over concise KgtkValue dicts.", file=error_file, flush=True) for kgtk_value_dict in kr.kgtk_value_dicts(concise=True, validate=args.test_validate): line_count += 1 print("Read %d lines" % line_count, file=error_file, flush=True) if __name__ == "__main__": main()
49.238701
188
0.586486
from argparse import ArgumentParser, _ArgumentGroup, Namespace, SUPPRESS import attr import bz2 from enum import Enum import gzip import lz4 import lzma from multiprocessing import Process, Queue from pathlib import Path import sys import typing from kgtk.kgtkformat import KgtkFormat from kgtk.io.kgtkbase import KgtkBase from kgtk.utils.argparsehelpers import optional_bool from kgtk.utils.closableiter import ClosableIter, ClosableIterTextIOWrapper from kgtk.utils.enumnameaction import EnumNameAction from kgtk.utils.gzipprocess import GunzipProcess from kgtk.utils.validationaction import ValidationAction from kgtk.value.kgtkvalue import KgtkValue from kgtk.value.kgtkvalueoptions import KgtkValueOptions, DEFAULT_KGTK_VALUE_OPTIONS class KgtkReaderMode(Enum): NONE = 0 EDGE = 1 NODE = 2 AUTO = 3 @attr.s(slots=True, frozen=True) class KgtkReaderOptions(): ERROR_LIMIT_DEFAULT: int = 1000 GZIP_QUEUE_SIZE_DEFAULT: int = GunzipProcess.GZIP_QUEUE_SIZE_DEFAULT mode: KgtkReaderMode = attr.ib(validator=attr.validators.instance_of(KgtkReaderMode), default=KgtkReaderMode.AUTO) column_separator: str = attr.ib(validator=attr.validators.instance_of(str), default=KgtkFormat.COLUMN_SEPARATOR) force_column_names: typing.Optional[typing.List[str]] = attr.ib(validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(str), iterable_validator=attr.validators.instance_of(list))), default=None) skip_header_record: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) initial_skip_count: int = attr.ib(validator=attr.validators.instance_of(int), default=0) every_nth_record: int = attr.ib(validator=attr.validators.instance_of(int), default=1) record_limit: typing.Optional[int] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(int)), default=None) tail_count: typing.Optional[int] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(int)), default=None) error_limit: int = attr.ib(validator=attr.validators.instance_of(int), default=ERROR_LIMIT_DEFAULT) repair_and_validate_lines: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) repair_and_validate_values: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) empty_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE) comment_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE) whitespace_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE) blank_required_field_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE) short_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN) long_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN) header_error_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXIT) unsafe_column_name_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.REPORT) invalid_value_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN) prohibited_list_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN) fill_short_lines: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) truncate_long_lines: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) compression_type: typing.Optional[str] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(str)), default=None) gzip_in_parallel: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) gzip_queue_size: int = attr.ib(validator=attr.validators.instance_of(int), default=GZIP_QUEUE_SIZE_DEFAULT) @classmethod def add_arguments(cls, parser: ArgumentParser, mode_options: bool = False, default_mode: KgtkReaderMode = KgtkReaderMode.AUTO, validate_by_default: bool = False, expert: bool = False, defaults: bool = True, who: str = "", ): def h(msg: str)->str: if expert: return msg else: return SUPPRESS # explicitly setting "default=None" may fail, whereas omitting the # "default=" phrase succeeds. # # TODO: continue researching these issues. def d(default: typing.Any)->typing.Mapping[str, typing.Any]: if defaults: return {"default": default} else: return { } prefix1: str = "--" if len(who) == 0 else "--" + who + "-" prefix2: str = "" if len(who) == 0 else who + "_" prefix3: str = "" if len(who) == 0 else who + ": " prefix4: str = "" if len(who) == 0 else who + " file " fgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "File options"), h("Options affecting " + prefix4 + "processing.")) fgroup.add_argument(prefix1 + "column-separator", dest=prefix2 + "column_separator", help=h(prefix3 + "Column separator (default=<TAB>)."), # TODO: provide the default with escapes, e.g. \t type=str, **d(default=KgtkFormat.COLUMN_SEPARATOR)) # TODO: use an Enum or add choices. fgroup.add_argument(prefix1 + "compression-type", dest=prefix2 + "compression_type", help=h(prefix3 + "Specify the compression type (default=%(default)s).")) fgroup.add_argument(prefix1 + "error-limit", dest=prefix2 + "error_limit", help=h(prefix3 + "The maximum number of errors to report before failing (default=%(default)s)"), type=int, **d(default=cls.ERROR_LIMIT_DEFAULT)) fgroup.add_argument(prefix1 + "gzip-in-parallel", dest=prefix2 + "gzip_in_parallel", metavar="optional True|False", help=h(prefix3 + "Execute gzip in parallel (default=%(default)s)."), type=optional_bool, nargs='?', const=True, **d(default=False)) fgroup.add_argument(prefix1 + "gzip-queue-size", dest=prefix2 + "gzip_queue_size", help=h(prefix3 + "Queue size for parallel gzip (default=%(default)s)."), type=int, **d(default=cls.GZIP_QUEUE_SIZE_DEFAULT)) if mode_options: fgroup.add_argument(prefix1 + "mode", dest=prefix2 + "mode", help=h(prefix3 + "Determine the KGTK file mode (default=%(default)s)."), type=KgtkReaderMode, action=EnumNameAction, **d(default_mode)) hgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "Header parsing"), h("Options affecting " + prefix4 + "header parsing.")) hgroup.add_argument(prefix1 + "force-column-names", dest=prefix2 + "force_column_names", help=h(prefix3 + "Force the column names (default=None)."), nargs='+') hgroup.add_argument(prefix1 + "header-error-action", dest=prefix2 + "header_error_action", help=h(prefix3 + "The action to take when a header error is detected. Only ERROR or EXIT are supported (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXIT)) hgroup.add_argument(prefix1 + "skip-header-record", dest=prefix2 + "skip_header_record", metavar="optional True|False", help=h(prefix3 + "Skip the first record when forcing column names (default=%(default)s)."), type=optional_bool, nargs='?', const=True, **d(default=False)) hgroup.add_argument(prefix1 + "unsafe-column-name-action", dest=prefix2 + "unsafe_column_name_action", help=h(prefix3 + "The action to take when a column name is unsafe (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.REPORT)) sgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "Pre-validation sampling"), h("Options affecting " + prefix4 + "pre-validation data line sampling.")) sgroup.add_argument(prefix1 + "initial-skip-count", dest=prefix2 + "initial_skip_count", help=h(prefix3 + "The number of data records to skip initially (default=do not skip)."), type=int, **d(default=0)) sgroup.add_argument(prefix1 + "every-nth-record", dest=prefix2 + "every_nth_record", help=h(prefix3 + "Pass every nth record (default=pass all records)."), type=int, **d(default=1)) sgroup.add_argument(prefix1 + "record-limit", dest=prefix2 + "record_limit", help=h(prefix3 + "Limit the number of records read (default=no limit)."), type=int, **d(default=None)) sgroup.add_argument(prefix1 + "tail-count", dest=prefix2 + "tail_count", help=h(prefix3 + "Pass this number of records (default=no tail processing)."), type=int, **d(default=None)) lgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "Line parsing"), h("Options affecting " + prefix4 + "data line parsing.")) lgroup.add_argument(prefix1 + "repair-and-validate-lines", dest=prefix2 + "repair_and_validate_lines", metavar="optional True|False", help=h(prefix3 + "Repair and validate lines (default=%(default)s)."), type=optional_bool, nargs='?', const=True, **d(default=validate_by_default)) lgroup.add_argument(prefix1 + "repair-and-validate-values", dest=prefix2 + "repair_and_validate_values", metavar="optional True|False", help=h(prefix3 + "Repair and validate values (default=%(default)s)."), type=optional_bool, nargs='?', const=True, **d(default=validate_by_default)) lgroup.add_argument(prefix1 + "blank-required-field-line-action", dest=prefix2 + "blank_required_field_line_action", help=h(prefix3 + "The action to take when a line with a blank node1, node2, or id field (per mode) is detected (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE)) lgroup.add_argument(prefix1 + "comment-line-action", dest=prefix2 + "comment_line_action", help=h(prefix3 + "The action to take when a comment line is detected (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE)) lgroup.add_argument(prefix1 + "empty-line-action", dest=prefix2 + "empty_line_action", help=h(prefix3 + "The action to take when an empty line is detected (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE)) lgroup.add_argument(prefix1 + "fill-short-lines", dest=prefix2 + "fill_short_lines", metavar="optional True|False", help=h(prefix3 + "Fill missing trailing columns in short lines with empty values (default=%(default)s)."), type=optional_bool, nargs='?', const=True, **d(default=False)) lgroup.add_argument(prefix1 + "invalid-value-action", dest=prefix2 + "invalid_value_action", help=h(prefix3 + "The action to take when a data cell value is invalid (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN)) lgroup.add_argument(prefix1 + "long-line-action", dest=prefix2 + "long_line_action", help=h(prefix3 + "The action to take when a long line is detected (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN)) lgroup.add_argument(prefix1 + "prohibited-list-action", dest=prefix2 + "prohibited list_action", help=h(prefix3 + "The action to take when a data cell contains a prohibited list (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN)) lgroup.add_argument(prefix1 + "short-line-action", dest=prefix2 + "short_line_action", help=h(prefix3 + "The action to take when a short line is detected (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN)) lgroup.add_argument(prefix1 + "truncate-long-lines", dest=prefix2 + "truncate_long_lines", help=h(prefix3 + "Remove excess trailing columns in long lines (default=%(default)s)."), type=optional_bool, nargs='?', const=True, **d(default=False)) lgroup.add_argument(prefix1 + "whitespace-line-action", dest=prefix2 + "whitespace_line_action", help=h(prefix3 + "The action to take when a whitespace line is detected (default=%(default)s)."), type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE)) @classmethod # Build the value parsing option structure. def from_dict(cls, d: dict, who: str = "", mode: typing.Optional[KgtkReaderMode] = None, fallback: bool = False, )->'KgtkReaderOptions': prefix: str = "" # The destination name prefix. if len(who) > 0: prefix = who + "_" # TODO: Figure out how to type check this method. def lookup(name: str, default): prefixed_name = prefix + name if prefixed_name in d and d[prefixed_name] is not None: return d[prefixed_name] elif fallback and name in d and d[name] is not None: return d[name] else: return default reader_mode: KgtkReaderMode if mode is not None: reader_mode = mode else: reader_mode = lookup("mode", KgtkReaderMode.AUTO) return cls( blank_required_field_line_action=lookup("blank_required_field_line_action", ValidationAction.EXCLUDE), column_separator=lookup("column_separator", KgtkFormat.COLUMN_SEPARATOR), comment_line_action=lookup("comment_line_action", ValidationAction.EXCLUDE), compression_type=lookup("compression_type", None), empty_line_action=lookup("empty_line_action", ValidationAction.EXCLUDE), error_limit=lookup("error_limit", cls.ERROR_LIMIT_DEFAULT), every_nth_record=lookup("every_nth_record", 1), fill_short_lines=lookup("fill_short_lines", False), force_column_names=lookup("force_column_names", None), gzip_in_parallel=lookup("gzip_in_parallel", False), gzip_queue_size=lookup("gzip_queue_size", KgtkReaderOptions.GZIP_QUEUE_SIZE_DEFAULT), header_error_action=lookup("header_error_action", ValidationAction.EXCLUDE), initial_skip_count=lookup("initial_skip_count", 0), invalid_value_action=lookup("invalid_value_action", ValidationAction.REPORT), long_line_action=lookup("long_line_action", ValidationAction.EXCLUDE), mode=reader_mode, prohibited_list_action=lookup("prohibited_list_action", ValidationAction.REPORT), record_limit=lookup("record_limit", None), repair_and_validate_lines=lookup("repair_and_validate_lines", False), repair_and_validate_values=lookup("repair_and_validate_values", False), short_line_action=lookup("short_line_action", ValidationAction.EXCLUDE), skip_header_record=lookup("skip_header_recordb", False), tail_count=lookup("tail_count", None), truncate_long_lines=lookup("truncate_long_lines", False), unsafe_column_name_action=lookup("unsafe_column_name_action", ValidationAction.REPORT), whitespace_line_action=lookup("whitespace_line_action", ValidationAction.EXCLUDE), ) # Build the value parsing option structure. @classmethod def from_args(cls, args: Namespace, who: str = "", mode: typing.Optional[KgtkReaderMode] = None, fallback: bool = False, )->'KgtkReaderOptions': return cls.from_dict(vars(args), who=who, mode=mode, fallback=fallback) def show(self, who: str="", out: typing.TextIO=sys.stderr): prefix: str = "--" if len(who) == 0 else "--" + who + "-" print("%smode=%s" % (prefix, self.mode.name), file=out) print("%scolumn-separator=%s" % (prefix, repr(self.column_separator)), file=out) if self.force_column_names is not None: print("%sforce-column-names=%s" % (prefix, " ".join(self.force_column_names)), file=out) print("%sskip-header-record=%s" % (prefix, str(self.skip_header_record)), file=out) print("%serror-limit=%s" % (prefix, str(self.error_limit)), file=out) print("%srepair-and-validate-lines=%s" % (prefix, str(self.repair_and_validate_lines)), file=out) print("%srepair-and-validate-values=%s" % (prefix, str(self.repair_and_validate_values)), file=out) print("%sempty-line-action=%s" % (prefix, self.empty_line_action.name), file=out) print("%scomment-line-action=%s" % (prefix, self.comment_line_action.name), file=out) print("%swhitespace-line-action=%s" % (prefix, self.whitespace_line_action.name), file=out) print("%sblank-required-field-line-action=%s" % (prefix, self.blank_required_field_line_action.name), file=out) print("%sshort-line-action=%s" % (prefix, self.short_line_action.name), file=out) print("%slong-line-action=%s" % (prefix, self.long_line_action.name), file=out) print("%sheader-error-action=%s" % (prefix, self.header_error_action.name), file=out) print("%sunsafe-column-name-action=%s" % (prefix, self.unsafe_column_name_action.name), file=out) print("%sinvalid-value-action=%s" % (prefix, self.invalid_value_action.name), file=out) print("%sinitial-skip-count=%s" % (prefix, str(self.initial_skip_count)), file=out) print("%severy-nth-record=%s" % (prefix, str(self.every_nth_record)), file=out) if self.record_limit is not None: print("%srecord-limit=%s" % (prefix, str(self.record_limit)), file=out) if self.tail_count is not None: print("%stail-count=%s" % (prefix, str(self.tail_count)), file=out) print("%sinitial-skip-count=%s" % (prefix, str(self.initial_skip_count)), file=out) print("%sprohibited-list-action=%s" % (prefix, self.prohibited_list_action.name), file=out) print("%sfill-short-lines=%s" % (prefix, str(self.fill_short_lines)), file=out) print("%struncate-long-lines=%s" % (prefix, str(self.truncate_long_lines)), file=out) if self.compression_type is not None: print("%scompression-type=%s" % (prefix, str(self.compression_type)), file=out) print("%sgzip-in-parallel=%s" % (prefix, str(self.gzip_in_parallel)), file=out) print("%sgzip-queue-size=%s" % (prefix, str(self.gzip_queue_size)), file=out) DEFAULT_KGTK_READER_OPTIONS: KgtkReaderOptions = KgtkReaderOptions() @attr.s(slots=True, frozen=False) class KgtkReader(KgtkBase, ClosableIter[typing.List[str]]): file_path: typing.Optional[Path] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Path))) source: ClosableIter[str] = attr.ib() # Todo: validate # TODO: Fix this validator: # options: KgtkReaderOptions = attr.ib(validator=attr.validators.instance_of(KgtkReaderOptions)) options: KgtkReaderOptions = attr.ib() value_options: KgtkValueOptions = attr.ib(validator=attr.validators.instance_of(KgtkValueOptions)) column_names: typing.List[str] = attr.ib(validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(str), iterable_validator=attr.validators.instance_of(list))) # For convenience, the count of columns. This is the same as len(column_names). column_count: int = attr.ib(validator=attr.validators.instance_of(int)) column_name_map: typing.Mapping[str, int] = attr.ib(validator=attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(int))) # The actual mode used. # # TODO: fix the validator. # mode: KgtkReaderMode = attr.ib(validator=attr.validators.instance_of(KgtkReaderMode), default=KgtkReaderMode.NONE) mode: KgtkReaderMode = attr.ib(default=KgtkReaderMode.NONE) # The index of the mandatory/aliased columns. -1 means missing: node1_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # edge file label_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # edge file node2_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # edge file id_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # node file data_lines_read: int = attr.ib(validator=attr.validators.instance_of(int), default=0) data_lines_skipped: int = attr.ib(validator=attr.validators.instance_of(int), default=0) data_lines_passed: int = attr.ib(validator=attr.validators.instance_of(int), default=0) data_lines_ignored: int = attr.ib(validator=attr.validators.instance_of(int), default=0) data_errors_reported: int = attr.ib(validator=attr.validators.instance_of(int), default=0) # Is this an edge file or a node file? is_edge_file: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) is_node_file: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) # Feedback and error output: error_file: typing.TextIO = attr.ib(default=sys.stderr) verbose: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) very_verbose: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) @classmethod def _default_options( cls, options: typing.Optional[KgtkReaderOptions] = None, value_options: typing.Optional[KgtkValueOptions] = None, )->typing.Tuple[KgtkReaderOptions, KgtkValueOptions]: # Supply the default reader and value options: if options is None: options = DEFAULT_KGTK_READER_OPTIONS if value_options is None: value_options = DEFAULT_KGTK_VALUE_OPTIONS return (options, value_options) @classmethod def open(cls, file_path: typing.Optional[Path], who: str = "input", error_file: typing.TextIO = sys.stderr, mode: typing.Optional[KgtkReaderMode] = None, options: typing.Optional[KgtkReaderOptions] = None, value_options: typing.Optional[KgtkValueOptions] = None, verbose: bool = False, very_verbose: bool = False)->"KgtkReader": # Supply the default reader and value options: (options, value_options) = cls._default_options(options, value_options) source: ClosableIter[str] = cls._openfile(file_path, options=options, error_file=error_file, verbose=verbose) # Read the kgtk file header and split it into column names. We get the # header back, too, for use in debugging and error messages. header: str column_names: typing.List[str] (header, column_names) = cls._build_column_names(source, options, error_file=error_file, verbose=verbose) # Check for unsafe column names. cls.check_column_names(column_names, header_line=header, who=who, error_action=options.unsafe_column_name_action, error_file=error_file) # Build a map from column name to column index. column_name_map: typing.Mapping[str, int] = cls.build_column_name_map(column_names, header_line=header, who=who, error_action=options.header_error_action, error_file=error_file) # Should we automatically determine if this is an edge file or a node file? if mode is None: mode = options.mode is_edge_file: bool = False is_node_file: bool = False if mode is KgtkReaderMode.AUTO: # If we have a node1 (or alias) column, then this must be an edge file. Otherwise, assume it is a node file. node1_idx: int = cls.get_column_idx(cls.NODE1_COLUMN_NAMES, column_name_map, header_line=header, who=who, error_action=options.header_error_action, error_file=error_file, is_optional=True) if node1_idx >= 0: is_edge_file = True is_node_file = False if verbose: print("%s column found, this is a KGTK edge file" % column_names[node1_idx], file=error_file, flush=True) else: is_edge_file = False is_node_file = True if verbose: print("node1 column not found, assuming this is a KGTK node file", file=error_file, flush=True) elif mode is KgtkReaderMode.EDGE: is_edge_file = True elif mode is KgtkReaderMode.NODE: is_node_file = True elif mode is KgtkReaderMode.NONE: pass # Get the indices of the special columns. node1_column_idx: int label_column_idx: int node2_column_idx: int id_column_idx: int (node1_column_idx, label_column_idx, node2_column_idx, id_column_idx) = cls.get_special_columns(column_name_map, header_line=header, who=who, error_action=options.header_error_action, error_file=error_file, is_edge_file=is_edge_file, is_node_file=is_node_file) if verbose: print("KgtkReader: Special columns: node1=%d label=%d node2=%d id=%d" % (node1_column_idx, label_column_idx, node2_column_idx, id_column_idx), file=error_file, flush=True) if is_edge_file: # We'll instantiate an EdgeReader, which is a subclass of KgtkReader. from kgtk.io.edgereader import EdgeReader if verbose: print("KgtkReader: Reading an edge file.", file=error_file, flush=True) cls = EdgeReader elif is_node_file: # The NodeReader import is deferred to avoid circular imports. from kgtk.io.nodereader import NodeReader if verbose: print("KgtkReader: Reading an node file.", file=error_file, flush=True) cls = NodeReader return cls(file_path=file_path, source=source, column_names=column_names, column_name_map=column_name_map, column_count=len(column_names), mode=mode, node1_column_idx=node1_column_idx, label_column_idx=label_column_idx, node2_column_idx=node2_column_idx, id_column_idx=id_column_idx, error_file=error_file, options=options, value_options=value_options, is_edge_file=is_edge_file, is_node_file=is_node_file, verbose=verbose, very_verbose=very_verbose, ) @classmethod def _open_compressed_file(cls, compression_type: str, file_name: str, file_or_path: typing.Union[Path, typing.TextIO], who: str, error_file: typing.TextIO, verbose: bool)->typing.TextIO: # TODO: find a better way to coerce typing.IO[Any] to typing.TextIO if compression_type in [".gz", "gz"]: if verbose: print("%s: reading gzip %s" % (who, file_name), file=error_file, flush=True) return gzip.open(file_or_path, mode="rt") # type: ignore elif compression_type in [".bz2", "bz2"]: if verbose: print("%s: reading bz2 %s" % (who, file_name), file=error_file, flush=True) return bz2.open(file_or_path, mode="rt") # type: ignore elif compression_type in [".xz", "xz"]: if verbose: print("%s: reading lzma %s" % (who, file_name), file=error_file, flush=True) return lzma.open(file_or_path, mode="rt") # type: ignore elif compression_type in [".lz4", "lz4"]: if verbose: print("%s: reading lz4 %s" % (who, file_name), file=error_file, flush=True) return lz4.frame.open(file_or_path, mode="rt") # type: ignore else: # TODO: throw a better exception. raise ValueError("%s: Unexpected compression_type '%s'" % (who, compression_type)) @classmethod def _openfile(cls, file_path: typing.Optional[Path], options: KgtkReaderOptions, error_file: typing.TextIO, verbose: bool)->ClosableIter[str]: who: str = cls.__name__ if file_path is None or str(file_path) == "-": if options.compression_type is not None and len(options.compression_type) > 0: return ClosableIterTextIOWrapper(cls._open_compressed_file(options.compression_type, "-", sys.stdin, who, error_file, verbose)) else: if verbose: print("%s: reading stdin" % who, file=error_file, flush=True) return ClosableIterTextIOWrapper(sys.stdin) if verbose: print("%s: File_path.suffix: %s" % (who, file_path.suffix), file=error_file, flush=True) gzip_file: typing.TextIO if options.compression_type is not None and len(options.compression_type) > 0: gzip_file = cls._open_compressed_file(options.compression_type, str(file_path), file_path, who, error_file, verbose) elif file_path.suffix in [".bz2", ".gz", ".lz4", ".xz"]: gzip_file = cls._open_compressed_file(file_path.suffix, str(file_path), file_path, who, error_file, verbose) else: if verbose: print("%s: reading file %s" % (who, str(file_path)), file=error_file, flush=True) return ClosableIterTextIOWrapper(open(file_path, "r")) if options.gzip_in_parallel: gzip_thread: GunzipProcess = GunzipProcess(gzip_file, Queue(options.gzip_queue_size)) gzip_thread.start() return gzip_thread else: return ClosableIterTextIOWrapper(gzip_file) @classmethod def _build_column_names(cls, source: ClosableIter[str], options: KgtkReaderOptions, error_file: typing.TextIO, verbose: bool = False, )->typing.Tuple[str, typing.List[str]]: column_names: typing.List[str] if options.force_column_names is None: # Read the column names from the first line, stripping end-of-line characters. # # TODO: if the read fails, throw a more useful exception with the line number. try: header: str = next(source).rstrip("\r\n") except StopIteration: raise ValueError("No header line in file") if verbose: print("header: %s" % header, file=error_file, flush=True) # Split the first line into column names. return header, header.split(options.column_separator) else: # Skip the first record to override the column names in the file. # Do not skip the first record if the file does not hae a header record. if options.skip_header_record: try: next(source) except StopIteration: raise ValueError("No header line to skip") # Use the forced column names. return options.column_separator.join(options.force_column_names), options.force_column_names def close(self): self.source.close() def exclude_line(self, action: ValidationAction, msg: str, line: str)->bool: result: bool if action == ValidationAction.PASS: return False # Silently pass the line through elif action == ValidationAction.REPORT: result= False # Report the issue then pass the line. elif action == ValidationAction.EXCLUDE: return True # Silently exclude the line elif action == ValidationAction.COMPLAIN: result = True # Report the issue then exclude the line. elif action == ValidationAction.ERROR: # Immediately raise an exception. raise ValueError("In input data line %d, %s: %s" % (self.data_lines_read, msg, line)) elif action == ValidationAction.EXIT: print("Data line %d:\n%s\n%s" % (self.data_lines_read, line, msg), file=self.error_file, flush=True) sys.exit(1) # print("In input data line %d, %s: %s" % (self.data_lines_read, msg, line), file=self.error_file, flush=True) print("Data line %d:\n%s\n%s" % (self.data_lines_read, line, msg), file=self.error_file, flush=True) self.data_errors_reported += 1 if self.options.error_limit > 0 and self.data_errors_reported >= self.options.error_limit: raise ValueError("Too many data errors, exiting.") return result # Get the next edge values as a list of strings. def nextrow(self)-> typing.List[str]: row: typing.List[str] repair_and_validate_lines: bool = self.options.repair_and_validate_lines repair_and_validate_values: bool = self.options.repair_and_validate_values # Compute the initial skip count skip_count: int = self.options.initial_skip_count if self.options.record_limit is not None and self.options.tail_count is not None: # Compute the tail count. tail_skip_count: int = self.options.record_limit - self.options.tail_count if tail_skip_count > skip_count: skip_count = tail_skip_count # Take the larger skip count. # This loop accomodates lines that are ignored. while (True): # Has a record limit been specified and have we reached it? if self.options.record_limit is not None: if self.data_lines_read >= self.options.record_limit: # Close the source and stop the iteration. self.source.close() # Do we need to guard against repeating this call? raise StopIteration # Read a line from the source line: str try: line = next(self.source) # Will throw StopIteration except StopIteration as e: # Close the input file! # # TODO: implement a close() routine and/or whatever it takes to support "with". self.source.close() # Do we need to guard against repeating this call? raise e # Count the data line read. self.data_lines_read += 1 # Data sampling: if self.data_lines_read <= skip_count: self.data_lines_skipped += 1 continue if self.options.every_nth_record > 1: if self.data_lines_read % self.options.every_nth_record != 0: self.data_lines_skipped += 1 continue # Strip the end-of-line characters: line = line.rstrip("\r\n") if repair_and_validate_lines: # TODO: Use a sepearate option to control this. if self.very_verbose: print("'%s'" % line, file=self.error_file, flush=True) # Ignore empty lines. if self.options.empty_line_action != ValidationAction.PASS and len(line) == 0: if self.exclude_line(self.options.empty_line_action, "saw an empty line", line): continue # Ignore comment lines: if self.options.comment_line_action != ValidationAction.PASS and line[0] == self.COMMENT_INDICATOR: if self.exclude_line(self.options.comment_line_action, "saw a comment line", line): continue # Ignore whitespace lines if self.options.whitespace_line_action != ValidationAction.PASS and line.isspace(): if self.exclude_line(self.options.whitespace_line_action, "saw a whitespace line", line): continue row = line.split(self.options.column_separator) if repair_and_validate_lines: # Optionally fill missing trailing columns with empty row: if self.options.fill_short_lines and len(row) < self.column_count: while len(row) < self.column_count: row.append("") # Optionally remove extra trailing columns: if self.options.truncate_long_lines and len(row) > self.column_count: row = row[:self.column_count] # Optionally validate that the line contained the right number of columns: # # When we report line numbers in error messages, line 1 is the first line after the header line. if self.options.short_line_action != ValidationAction.PASS and len(row) < self.column_count: if self.exclude_line(self.options.short_line_action, "Required %d columns, saw %d: '%s'" % (self.column_count, len(row), line), line): continue if self.options.long_line_action != ValidationAction.PASS and len(row) > self.column_count: if self.exclude_line(self.options.long_line_action, "Required %d columns, saw %d (%d extra): '%s'" % (self.column_count, len(row), len(row) - self.column_count, line), line): continue if self._ignore_if_blank_fields(row, line): continue if repair_and_validate_values: if self.options.invalid_value_action != ValidationAction.PASS: # TODO: find a way to optionally cache the KgtkValue objects # so we don't have to create them a second time in the conversion if self._ignore_invalid_values(row, line): continue if self.options.prohibited_list_action != ValidationAction.PASS: if self._ignore_prohibited_lists(row, line): continue self.data_lines_passed += 1 return row def __iter__(self)->typing.Iterator[typing.List[str]]: return self def __next__(self)-> typing.List[str]: return self.nextrow() def concise_rows(self)->typing.Iterator[typing.List[typing.Optional[str]]]: while True: try: row: typing.List[str] = self.nextrow() except StopIteration: return results: typing.List[typing.Optional[str]] = [ ] field: str for field in row: if len(field) == 0: results.append(None) else: results.append(field) yield results def to_kgtk_values(self, row: typing.List[str], validate: bool = False, parse_fields: bool = False)->typing.List[KgtkValue]: results: typing.List[KgtkValue] = [ ] field: str for field in row: kv = KgtkValue(field, options=self.value_options, parse_fields=parse_fields) if validate: kv.validate() results.append(kv) return results def kgtk_values(self, validate: bool = False, parse_fields: bool = False )->typing.Iterator[typing.List[KgtkValue]]: while True: try: yield self.to_kgtk_values(self.nextrow(), validate=validate, parse_fields=parse_fields) except StopIteration: return def to_concise_kgtk_values(self, row: typing.List[str], validate: bool = False, parse_fields: bool = False )->typing.List[typing.Optional[KgtkValue]]: results: typing.List[typing.Optional[KgtkValue]] = [ ] field: str for field in row: if len(field) == 0: results.append(None) else: kv = KgtkValue(field, options=self.value_options, parse_fields=parse_fields) if validate: kv.validate() results.append(kv) return results def concise_kgtk_values(self, validate: bool = False, parse_fields: bool = False )->typing.Iterator[typing.List[typing.Optional[KgtkValue]]]: while True: try: yield self.to_concise_kgtk_values(self.nextrow(), validate=validate) except StopIteration: return def to_dict(self, row: typing.List[str], concise: bool=False )->typing.Mapping[str, str]: results: typing.MutableMapping[str, str] = { } field: str idx: int = 0 # efficiency gain. if concise: for field in row: if len(field) > 0: results[self.column_names[idx]] = field idx += 1 else: for field in row: results[self.column_names[idx]] = field idx += 1 return results def dicts(self, concise: bool=False )->typing.Iterator[typing.Mapping[str, str]]: while True: try: yield self.to_dict(self.nextrow(), concise=concise) except StopIteration: return def to_kgtk_value_dict(self, row: typing.List[str], validate: bool=False, parse_fields: bool=False, concise: bool=False )->typing.Mapping[str, KgtkValue]: results: typing.MutableMapping[str, KgtkValue] = { } idx: int = 0 field: str for field in row: if concise and len(field) == 0: pass # Skip the empty field. else: kv = KgtkValue(field, options=self.value_options, parse_fields=parse_fields) if validate: kv.validate() results[self.column_names[idx]] = kv idx += 1 return results def kgtk_value_dicts(self, validate: bool=False, parse_fields: bool=False, concise: bool=False )->typing.Iterator[typing.Mapping[str, KgtkValue]]: while True: try: yield self.to_kgtk_value_dict(self.nextrow(), validate=validate, parse_fields=parse_fields, concise=concise) except StopIteration: return def _ignore_invalid_values(self, row: typing.List[str], line: str)->bool: problems: typing.List[str] = [ ] # Build a list of problems. idx: int item: str for idx, item in enumerate(row): if len(item) > 0: # Optimize the common case of empty columns. kv: KgtkValue = KgtkValue(item, options=self.value_options) if not kv.is_valid(): problems.append("col %d (%s) value '%s'is an %s" % (idx, self.column_names[idx], item, kv.describe())) if kv.repaired: # If this value was repaired, update the item in the row. # # Warning: We expect this change to be seen by the caller. row[idx] = kv.value if len(problems) == 0: return False return self.exclude_line(self.options.invalid_value_action, "\n".join(problems), line) def _ignore_prohibited_list(self, idx: int, row: typing.List[str], line: str, problems: typing.List[str], ): if idx < 0: return item: str = row[idx] if KgtkFormat.LIST_SEPARATOR not in item: return if len(KgtkValue.split_list(item)) == 1: return problems.append("col %d (%s) value '%s'is a prohibited list" % (idx, self.column_names[idx], item)) def _ignore_prohibited_lists(self, row: typing.List[str], line: str)->bool: problems: typing.List[str] = [ ] # Build a list of problems. self._ignore_prohibited_list(self.node1_column_idx, row, line, problems) self._ignore_prohibited_list(self.label_column_idx, row, line, problems) self._ignore_prohibited_list(self.node2_column_idx, row, line, problems) if len(problems) == 0: return False return self.exclude_line(self.options.invalid_value_action, "\n".join(problems), line) # May be overridden def _ignore_if_blank_fields(self, values: typing.List[str], line: str)->bool: return False # May be overridden def _skip_reserved_fields(self, column_name)->bool: return False def additional_column_names(self)->typing.List[str]: if self.is_edge_file: return KgtkBase.additional_edge_columns(self.column_names) elif self.is_node_file: return KgtkBase.additional_node_columns(self.column_names) else: # TODO: throw a better exception. raise ValueError("KgtkReader: Unknown Kgtk file type.") def merge_columns(self, additional_columns: typing.List[str])->typing.List[str]: merged_columns: typing.List[str] = self.column_names.copy() column_name: str for column_name in additional_columns: if column_name in self.column_name_map: continue merged_columns.append(column_name) return merged_columns def get_node1_column_index(self, column_name: typing.Optional[str] = None)->int: if column_name is None or len(column_name) == 0: return self.node1_column_idx else: return self.column_name_map.get(column_name, -1) def get_node1_canonical_name(self, column_name: typing.Optional[str]=None)->str: if column_name is not None and len(column_name) > 0: return column_name else: return KgtkFormat.NODE1 def get_node1_column_actual_name(self, column_name: typing.Optional[str]=None)->str: idx: int = self.get_node1_column_index(column_name) if idx >= 0: return self.column_names[idx] else: return "" def get_label_column_index(self, column_name: typing.Optional[str] = None)->int: if column_name is None or len(column_name) == 0: return self.label_column_idx else: return self.column_name_map.get(column_name, -1) def get_label_canonical_name(self, column_name: typing.Optional[str]=None)->str: if column_name is not None and len(column_name) > 0: return column_name else: return KgtkFormat.LABEL def get_label_column_actual_name(self, column_name: typing.Optional[str]=None)->str: idx: int = self.get_label_column_index(column_name) if idx >= 0: return self.column_names[idx] else: return "" def get_node2_column_index(self, column_name: typing.Optional[str] = None)->int: if column_name is None or len(column_name) == 0: return self.node2_column_idx else: return self.column_name_map.get(column_name, -1) def get_node2_canonical_name(self, column_name: typing.Optional[str]=None)->str: if column_name is not None and len(column_name) > 0: return column_name else: return KgtkFormat.NODE2 def get_node2_column_actual_name(self, column_name: typing.Optional[str]=None)->str: idx: int = self.get_node2_column_index(column_name) if idx >= 0: return self.column_names[idx] else: return "" def get_id_column_index(self, column_name: typing.Optional[str] = None)->int: if column_name is None or len(column_name) == 0: return self.id_column_idx else: return self.column_name_map.get(column_name, -1) def get_id_canonical_name(self, column_name: typing.Optional[str]=None)->str: if column_name is not None and len(column_name) > 0: return column_name else: return KgtkFormat.ID def get_id_column_actual_name(self, column_name: typing.Optional[str]=None)->str: idx: int = self.get_id_column_index(column_name) if idx >= 0: return self.column_names[idx] else: return "" @classmethod def add_debug_arguments(cls, parser: ArgumentParser, expert: bool = False): # This helper function makes it easy to suppress options from # The help message. The options are still there, and initialize # what they need to initialize. def h(msg: str)->str: if expert: return msg else: return SUPPRESS egroup: _ArgumentGroup = parser.add_argument_group(h("Error and feedback messages"), h("Send error messages and feedback to stderr or stdout, " + "control the amount of feedback and debugging messages.")) # Avoid the argparse bug that prevents these two arguments from having # their help messages suppressed directly. # # TODO: Is there a better fix? # # TODO: replace --errors-to-stdout and --errors-to-stderr with # --errors-to=stdout and --errors-to=stderr, using either an enum # or choices. That will avoid the argparse bug, too. if expert: errors_to = egroup.add_mutually_exclusive_group() errors_to.add_argument( "--errors-to-stdout", dest="errors_to_stdout", help="Send errors to stdout instead of stderr", action="store_true") errors_to.add_argument( "--errors-to-stderr", dest="errors_to_stderr", help="Send errors to stderr instead of stdout", action="store_true") else: egroup.add_argument( "--errors-to-stderr", dest="errors_to_stderr", help=h("Send errors to stderr instead of stdout"), action="store_true") egroup.add_argument( "--errors-to-stdout", dest="errors_to_stdout", help=h("Send errors to stdout instead of stderr"), action="store_true") egroup.add_argument( "--show-options", dest="show_options", help=h("Print the options selected (default=%(default)s)."), action='store_true') egroup.add_argument("-v", "--verbose", dest="verbose", help="Print additional progress messages (default=%(default)s).", action='store_true') egroup.add_argument( "--very-verbose", dest="very_verbose", help=h("Print additional progress messages (default=%(default)s)."), action='store_true') def main(): # The EdgeReader import is deferred to avoid circular imports. from kgtk.io.edgereader import EdgeReader # The NodeReader import is deferred to avoid circular imports. from kgtk.io.nodereader import NodeReader parser = ArgumentParser() parser.add_argument(dest="kgtk_file", help="The KGTK file to read", type=Path, nargs="?") KgtkReader.add_debug_arguments(parser, expert=True) parser.add_argument( "--test", dest="test_method", help="The test to perform (default=%(default)s).", choices=["rows", "concise-rows", "kgtk-values", "concise-kgtk-values", "dicts", "concise-dicts", "kgtk-value-dicts", "concise-kgtk-value-dicts"], default="rows") parser.add_argument( "--test-validate", dest="test_validate", help="Validate KgtkValue objects in test (default=%(default)s).", type=optional_bool, nargs='?', const=True, default=False) KgtkReaderOptions.add_arguments(parser, mode_options=True, validate_by_default=True, expert=True) KgtkValueOptions.add_arguments(parser, expert=True) args: Namespace = parser.parse_args() error_file: typing.TextIO = sys.stdout if args.errors_to_stdout else sys.stderr # Build the option structures. reader_options: KgtkReaderOptions = KgtkReaderOptions.from_args(args) value_options: KgtkValueOptions = KgtkValueOptions.from_args(args) if args.show_options: print("--test=%s" % str(args.test), file=error_file) print("--test-validate=%s" % str(args.test_validate), file=error_file) reader_options.show(out=error_file) value_options.show(out=error_file) print("=======", file=error_file, flush=True) kr: KgtkReader = KgtkReader.open(args.kgtk_file, error_file = error_file, options=reader_options, value_options=value_options, verbose=args.verbose, very_verbose=args.very_verbose) line_count: int = 0 row: typing.List[str] kgtk_values: typing.List[KgtkValue] concise_kgtk_values: typing.List[typing.Optional[KgtkValue]] dict_row: typing.Mapping[str, str] kgtk_value_dict: typing.Mapping[str, str] if args.test_method == "rows": if args.verbose: print("Testing iterating over rows.", file=error_file, flush=True) for row in kr: line_count += 1 elif args.test_method == "concise-rows": if args.verbose: print("Testing iterating over concise rows.", file=error_file, flush=True) for row in kr.concise_rows(): line_count += 1 elif args.test_method == "kgtk-values": if args.verbose: print("Testing iterating over KgtkValue rows.", file=error_file, flush=True) for kgtk_values in kr.kgtk_values(validate=args.test_validate): line_count += 1 elif args.test_method == "concise-kgtk-values": if args.verbose: print("Testing iterating over concise KgtkValue rows.", file=error_file, flush=True) for kgtk_values in kr.concise_kgtk_values(validate=args.test_validate): line_count += 1 elif args.test_method == "dicts": if args.verbose: print("Testing iterating over dicts.", file=error_file, flush=True) for dict_row in kr.dicts(): line_count += 1 elif args.test_method == "concise-dicts": if args.verbose: print("Testing iterating over concise dicts.", file=error_file, flush=True) for dict_row in kr.dicts(concise=True): line_count += 1 elif args.test_method == "kgtk-value-dicts": if args.verbose: print("Testing iterating over KgtkValue dicts.", file=error_file, flush=True) for kgtk_value_dict in kr.kgtk_value_dicts(validate=args.test_validate): line_count += 1 elif args.test_method == "concise-kgtk-value-dicts": if args.verbose: print("Testing iterating over concise KgtkValue dicts.", file=error_file, flush=True) for kgtk_value_dict in kr.kgtk_value_dicts(concise=True, validate=args.test_validate): line_count += 1 print("Read %d lines" % line_count, file=error_file, flush=True) if __name__ == "__main__": main()
true
true
1c45e033fc674af02e4beaed1d9dd8f01d305f9a
686
py
Python
app/core/management/commands/wait_for_db.py
amir-rz/recipe-app-api
69f8c2ee801cd4bf909979bd246b8fe1bf9e2d60
[ "MIT" ]
null
null
null
app/core/management/commands/wait_for_db.py
amir-rz/recipe-app-api
69f8c2ee801cd4bf909979bd246b8fe1bf9e2d60
[ "MIT" ]
null
null
null
app/core/management/commands/wait_for_db.py
amir-rz/recipe-app-api
69f8c2ee801cd4bf909979bd246b8fe1bf9e2d60
[ "MIT" ]
null
null
null
import time from django.db import connections from django.db.utils import OperationalError from django.core.management.base import BaseCommand class Command(BaseCommand): """ Django command to pause excuation until database is available """ def handle(self, *args, **options): self.stdout.write("Waiting for database...") db_conn = None while not db_conn: try: db_conn = connections["default"] except OperationalError: self.stdout.write("Database unavailable, waiting 1 second...") time.sleep(1) self.stdout.write(self.style.SUCCESS("Database available!")) # noqa: W391
34.3
82
0.650146
import time from django.db import connections from django.db.utils import OperationalError from django.core.management.base import BaseCommand class Command(BaseCommand): def handle(self, *args, **options): self.stdout.write("Waiting for database...") db_conn = None while not db_conn: try: db_conn = connections["default"] except OperationalError: self.stdout.write("Database unavailable, waiting 1 second...") time.sleep(1) self.stdout.write(self.style.SUCCESS("Database available!"))
true
true
1c45e09129bfd3cd066e0bd3ca94b7df369924d0
962
py
Python
atriage/collectors/flatdir.py
Ayrx/atriage
6e928da0d673260e61e089f69cb56555c7d9cdf6
[ "Apache-2.0" ]
11
2017-12-17T12:18:56.000Z
2021-05-10T23:11:29.000Z
atriage/collectors/flatdir.py
Ayrx/atriage
6e928da0d673260e61e089f69cb56555c7d9cdf6
[ "Apache-2.0" ]
7
2018-10-01T08:46:24.000Z
2021-06-01T21:48:44.000Z
atriage/collectors/flatdir.py
Ayrx/atriage
6e928da0d673260e61e089f69cb56555c7d9cdf6
[ "Apache-2.0" ]
3
2017-12-17T12:19:00.000Z
2019-03-25T09:31:52.000Z
from atriage.collectors.exceptions import NoopException from atriage.collectors.interface import CollectorInterface from pathlib import Path import click class FlatDirCollector(object): name = "flat-dir-collector" def __init__(self, results): self._results = results def parse_directory(self, directory): click.echo("Reading {}...".format(directory)) new = self._read_directory(directory) old = set([i[1] for i in self._results.all_crashes]) diff = new - old if len(diff) != 0: click.echo("Adding {} crashes.".format(len(diff))) self._results.save_crashes(diff) def gather_all_samples(self, directory): raise NoopException def _read_directory(self, directory): crashes = set() p = Path(directory) for crash in p.iterdir(): crashes.add(str(crash)) return crashes CollectorInterface.register(FlatDirCollector)
24.05
62
0.660083
from atriage.collectors.exceptions import NoopException from atriage.collectors.interface import CollectorInterface from pathlib import Path import click class FlatDirCollector(object): name = "flat-dir-collector" def __init__(self, results): self._results = results def parse_directory(self, directory): click.echo("Reading {}...".format(directory)) new = self._read_directory(directory) old = set([i[1] for i in self._results.all_crashes]) diff = new - old if len(diff) != 0: click.echo("Adding {} crashes.".format(len(diff))) self._results.save_crashes(diff) def gather_all_samples(self, directory): raise NoopException def _read_directory(self, directory): crashes = set() p = Path(directory) for crash in p.iterdir(): crashes.add(str(crash)) return crashes CollectorInterface.register(FlatDirCollector)
true
true
1c45e18204e4eaacee40b946650e58bb8faf467c
2,264
py
Python
email/sphinxcontrib/email.py
Tommy1969/sphinx-contrib
d479ece0fe6c2f33bbebcc52677035d5003b7b35
[ "BSD-2-Clause" ]
14
2016-02-22T12:06:54.000Z
2021-01-05T07:01:43.000Z
email/sphinxcontrib/email.py
SuperKogito/sphinx-contrib
3b643bffb90a27ae378717ae6335873a0e73cf9d
[ "BSD-2-Clause" ]
8
2015-03-06T13:46:49.000Z
2019-10-09T08:53:14.000Z
email/sphinxcontrib/email.py
SuperKogito/sphinx-contrib
3b643bffb90a27ae378717ae6335873a0e73cf9d
[ "BSD-2-Clause" ]
16
2015-05-25T02:51:05.000Z
2020-01-17T05:49:47.000Z
# E-mail obfuscation role for Sphinx. from docutils import nodes # The obfuscation code was taken from # # http://pypi.python.org/pypi/bud.nospam # # and was was released by Kevin Teague <kevin at bud ca> under # a BSD license. import re try: maketrans = ''.maketrans except AttributeError: # fallback for Python 2 from string import maketrans rot_13_trans = maketrans( 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', 'NOPQRSTUVWXYZABCDEFGHIJKLMnopqrstuvwxyzabcdefghijklm' ) def rot_13_encrypt(line): """Rotate 13 encryption. """ line = line.translate(rot_13_trans) line = re.sub('(?=[\\"])', r'\\', line) line = re.sub('\n', r'\n', line) line = re.sub('@', r'\\100', line) line = re.sub('\.', r'\\056', line) line = re.sub('/', r'\\057', line) return line def js_obfuscated_text(text): """ROT 13 encryption with embedded in Javascript code to decrypt in the browser. """ return """<script type="text/javascript">document.write( "%s".replace(/[a-zA-Z]/g, function(c){ return String.fromCharCode( (c<="Z"?90:122)>=(c=c.charCodeAt(0)+13)?c:c-26);})); </script>""" % rot_13_encrypt(text) def js_obfuscated_mailto(email, displayname=None): """ROT 13 encryption within an Anchor tag w/ a mailto: attribute """ if not displayname: displayname = email return js_obfuscated_text("""<a href="mailto:%s">%s</a>""" % ( email, displayname )) # -- end bud.nospam def email_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): """ Role to obfuscate e-mail addresses. """ text = text.decode('utf-8').encode('utf-8') # Handle addresses of the form "Name <name@domain.org>" if '<' in text and '>' in text: name, email = text.split('<') email = email.split('>')[0] elif '(' in text and ')' in text: name, email = text.split('(') email = email.split(')')[0] else: name = text email = name obfuscated = js_obfuscated_mailto(email, displayname=name) node = nodes.raw('', obfuscated, format='html') return [node], [] def setup(app): app.add_role('email', email_role)
27.277108
76
0.605124
from docutils import nodes import re try: maketrans = ''.maketrans except AttributeError: from string import maketrans rot_13_trans = maketrans( 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', 'NOPQRSTUVWXYZABCDEFGHIJKLMnopqrstuvwxyzabcdefghijklm' ) def rot_13_encrypt(line): line = line.translate(rot_13_trans) line = re.sub('(?=[\\"])', r'\\', line) line = re.sub('\n', r'\n', line) line = re.sub('@', r'\\100', line) line = re.sub('\.', r'\\056', line) line = re.sub('/', r'\\057', line) return line def js_obfuscated_text(text): return """<script type="text/javascript">document.write( "%s".replace(/[a-zA-Z]/g, function(c){ return String.fromCharCode( (c<="Z"?90:122)>=(c=c.charCodeAt(0)+13)?c:c-26);})); </script>""" % rot_13_encrypt(text) def js_obfuscated_mailto(email, displayname=None): if not displayname: displayname = email return js_obfuscated_text("""<a href="mailto:%s">%s</a>""" % ( email, displayname )) # -- end bud.nospam def email_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): text = text.decode('utf-8').encode('utf-8') # Handle addresses of the form "Name <name@domain.org>" if '<' in text and '>' in text: name, email = text.split('<') email = email.split('>')[0] elif '(' in text and ')' in text: name, email = text.split('(') email = email.split(')')[0] else: name = text email = name obfuscated = js_obfuscated_mailto(email, displayname=name) node = nodes.raw('', obfuscated, format='html') return [node], [] def setup(app): app.add_role('email', email_role)
true
true
1c45e1cfb9f789049e129d5f970c555b0f0ffd3c
15,669
py
Python
distributed/stealing.py
dchudz/distributed
591bca00af4fc07d0c5cac5189fc3b08ef8a93cd
[ "BSD-3-Clause" ]
null
null
null
distributed/stealing.py
dchudz/distributed
591bca00af4fc07d0c5cac5189fc3b08ef8a93cd
[ "BSD-3-Clause" ]
null
null
null
distributed/stealing.py
dchudz/distributed
591bca00af4fc07d0c5cac5189fc3b08ef8a93cd
[ "BSD-3-Clause" ]
null
null
null
import logging from collections import defaultdict, deque from math import log2 from time import time from tlz import topk from tornado.ioloop import PeriodicCallback import dask from dask.utils import parse_timedelta from .comm.addressing import get_address_host from .core import CommClosedError from .diagnostics.plugin import SchedulerPlugin from .utils import log_errors LATENCY = 10e-3 logger = logging.getLogger(__name__) LOG_PDB = dask.config.get("distributed.admin.pdb-on-err") class WorkStealing(SchedulerPlugin): def __init__(self, scheduler): self.scheduler = scheduler # { level: { task states } } self.stealable_all = [set() for i in range(15)] # { worker: { level: { task states } } } self.stealable = dict() # { task state: (worker, level) } self.key_stealable = dict() self.cost_multipliers = [1 + 2 ** (i - 6) for i in range(15)] self.cost_multipliers[0] = 1 for worker in scheduler.workers: self.add_worker(worker=worker) callback_time = parse_timedelta( dask.config.get("distributed.scheduler.work-stealing-interval"), default="ms", ) # `callback_time` is in milliseconds pc = PeriodicCallback(callback=self.balance, callback_time=callback_time * 1000) self._pc = pc self.scheduler.periodic_callbacks["stealing"] = pc self.scheduler.plugins.append(self) self.scheduler.extensions["stealing"] = self self.scheduler.events["stealing"] = deque(maxlen=100000) self.count = 0 # { task state: <stealing info dict> } self.in_flight = dict() # { worker state: occupancy } self.in_flight_occupancy = defaultdict(lambda: 0) self.scheduler.stream_handlers["steal-response"] = self.move_task_confirm def log(self, msg): return self.scheduler.log_event("stealing", msg) def add_worker(self, scheduler=None, worker=None): self.stealable[worker] = [set() for i in range(15)] def remove_worker(self, scheduler=None, worker=None): del self.stealable[worker] def teardown(self): self._pc.stop() def transition( self, key, start, finish, compute_start=None, compute_stop=None, *args, **kwargs ): if finish == "processing": ts = self.scheduler.tasks[key] self.put_key_in_stealable(ts) elif start == "processing": ts = self.scheduler.tasks[key] self.remove_key_from_stealable(ts) if finish != "memory": self.in_flight.pop(ts, None) def put_key_in_stealable(self, ts): cost_multiplier, level = self.steal_time_ratio(ts) if cost_multiplier is not None: ws = ts.processing_on worker = ws.address self.stealable_all[level].add(ts) self.stealable[worker][level].add(ts) self.key_stealable[ts] = (worker, level) def remove_key_from_stealable(self, ts): result = self.key_stealable.pop(ts, None) if result is None: return worker, level = result try: self.stealable[worker][level].remove(ts) except KeyError: pass try: self.stealable_all[level].remove(ts) except KeyError: pass def steal_time_ratio(self, ts): """The compute to communication time ratio of a key Returns ------- cost_multiplier: The increased cost from moving this task as a factor. For example a result of zero implies a task without dependencies. level: The location within a stealable list to place this value """ if not ts.dependencies: # no dependencies fast path return 0, 0 split = ts.prefix.name if split in fast_tasks: return None, None ws = ts.processing_on compute_time = ws.processing[ts] if compute_time < 0.005: # 5ms, just give up return None, None nbytes = ts.get_nbytes_deps() transfer_time = nbytes / self.scheduler.bandwidth + LATENCY cost_multiplier = transfer_time / compute_time if cost_multiplier > 100: return None, None level = int(round(log2(cost_multiplier) + 6)) if level < 1: level = 1 return cost_multiplier, level def move_task_request(self, ts, victim, thief): try: if self.scheduler.validate: if victim is not ts.processing_on and LOG_PDB: import pdb pdb.set_trace() key = ts.key self.remove_key_from_stealable(ts) logger.debug( "Request move %s, %s: %2f -> %s: %2f", key, victim, victim.occupancy, thief, thief.occupancy, ) victim_duration = victim.processing[ts] thief_duration = self.scheduler.get_task_duration( ts ) + self.scheduler.get_comm_cost(ts, thief) self.scheduler.stream_comms[victim.address].send( {"op": "steal-request", "key": key} ) self.in_flight[ts] = { "victim": victim, "thief": thief, "victim_duration": victim_duration, "thief_duration": thief_duration, } self.in_flight_occupancy[victim] -= victim_duration self.in_flight_occupancy[thief] += thief_duration except CommClosedError: logger.info("Worker comm %r closed while stealing: %r", victim, ts) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise async def move_task_confirm(self, key=None, worker=None, state=None): try: try: ts = self.scheduler.tasks[key] except KeyError: logger.debug("Key released between request and confirm: %s", key) return try: d = self.in_flight.pop(ts) except KeyError: return thief = d["thief"] victim = d["victim"] logger.debug( "Confirm move %s, %s -> %s. State: %s", key, victim, thief, state ) self.in_flight_occupancy[thief] -= d["thief_duration"] self.in_flight_occupancy[victim] += d["victim_duration"] if not self.in_flight: self.in_flight_occupancy = defaultdict(lambda: 0) if ts.state != "processing" or ts.processing_on is not victim: old_thief = thief.occupancy new_thief = sum(thief.processing.values()) old_victim = victim.occupancy new_victim = sum(victim.processing.values()) thief.occupancy = new_thief victim.occupancy = new_victim self.scheduler.total_occupancy += ( new_thief - old_thief + new_victim - old_victim ) return # One of the pair has left, punt and reschedule if ( thief.address not in self.scheduler.workers or victim.address not in self.scheduler.workers ): self.scheduler.reschedule(key) return # Victim had already started execution, reverse stealing if state in ("memory", "executing", "long-running", None): self.log(("already-computing", key, victim.address, thief.address)) self.scheduler.check_idle_saturated(thief) self.scheduler.check_idle_saturated(victim) # Victim was waiting, has given up task, enact steal elif state in ("waiting", "ready", "constrained"): self.remove_key_from_stealable(ts) ts.processing_on = thief duration = victim.processing.pop(ts) victim.occupancy -= duration self.scheduler.total_occupancy -= duration if not victim.processing: self.scheduler.total_occupancy -= victim.occupancy victim.occupancy = 0 thief.processing[ts] = d["thief_duration"] thief.occupancy += d["thief_duration"] self.scheduler.total_occupancy += d["thief_duration"] self.put_key_in_stealable(ts) try: self.scheduler.send_task_to_worker(thief.address, ts) except CommClosedError: await self.scheduler.remove_worker(thief.address) self.log(("confirm", key, victim.address, thief.address)) else: raise ValueError("Unexpected task state: %s" % state) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise finally: try: self.scheduler.check_idle_saturated(thief) except Exception: pass try: self.scheduler.check_idle_saturated(victim) except Exception: pass def balance(self): s = self.scheduler def combined_occupancy(ws): return ws.occupancy + self.in_flight_occupancy[ws] def maybe_move_task(level, ts, sat, idl, duration, cost_multiplier): occ_idl = combined_occupancy(idl) occ_sat = combined_occupancy(sat) if occ_idl + cost_multiplier * duration <= occ_sat - duration / 2: self.move_task_request(ts, sat, idl) log.append( ( start, level, ts.key, duration, sat.address, occ_sat, idl.address, occ_idl, ) ) s.check_idle_saturated(sat, occ=occ_sat) s.check_idle_saturated(idl, occ=occ_idl) with log_errors(): i = 0 idle = s.idle.values() saturated = s.saturated if not idle or len(idle) == len(s.workers): return log = [] start = time() if not s.saturated: saturated = topk(10, s.workers.values(), key=combined_occupancy) saturated = [ ws for ws in saturated if combined_occupancy(ws) > 0.2 and len(ws.processing) > ws.nthreads ] elif len(s.saturated) < 20: saturated = sorted(saturated, key=combined_occupancy, reverse=True) if len(idle) < 20: idle = sorted(idle, key=combined_occupancy) for level, cost_multiplier in enumerate(self.cost_multipliers): if not idle: break for sat in list(saturated): stealable = self.stealable[sat.address][level] if not stealable or not idle: continue for ts in list(stealable): if ts not in self.key_stealable or ts.processing_on is not sat: stealable.discard(ts) continue i += 1 if not idle: break if _has_restrictions(ts): thieves = [ws for ws in idle if _can_steal(ws, ts, sat)] else: thieves = idle if not thieves: break thief = thieves[i % len(thieves)] duration = sat.processing.get(ts) if duration is None: stealable.discard(ts) continue maybe_move_task( level, ts, sat, thief, duration, cost_multiplier ) if self.cost_multipliers[level] < 20: # don't steal from public at cost stealable = self.stealable_all[level] for ts in list(stealable): if not idle: break if ts not in self.key_stealable: stealable.discard(ts) continue sat = ts.processing_on if sat is None: stealable.discard(ts) continue if combined_occupancy(sat) < 0.2: continue if len(sat.processing) <= sat.nthreads: continue i += 1 if _has_restrictions(ts): thieves = [ws for ws in idle if _can_steal(ws, ts, sat)] else: thieves = idle if not thieves: continue thief = thieves[i % len(thieves)] duration = sat.processing[ts] maybe_move_task( level, ts, sat, thief, duration, cost_multiplier ) if log: self.log(log) self.count += 1 stop = time() if s.digests: s.digests["steal-duration"].add(stop - start) def restart(self, scheduler): for stealable in self.stealable.values(): for s in stealable: s.clear() for s in self.stealable_all: s.clear() self.key_stealable.clear() def story(self, *keys): keys = set(keys) out = [] for _, L in self.scheduler.get_event("stealing"): if not isinstance(L, list): L = [L] for t in L: if any(x in keys for x in t): out.append(t) return out def _has_restrictions(ts): """Determine whether the given task has restrictions and whether these restrictions are strict. """ return not ts.loose_restrictions and ( ts.host_restrictions or ts.worker_restrictions or ts.resource_restrictions ) def _can_steal(thief, ts, victim): """Determine whether worker ``thief`` can steal task ``ts`` from worker ``victim``. Assumes that `ts` has some restrictions. """ if ( ts.host_restrictions and get_address_host(thief.address) not in ts.host_restrictions ): return False elif ts.worker_restrictions and thief.address not in ts.worker_restrictions: return False if victim.resources is None: return True for resource, value in victim.resources.items(): try: supplied = thief.resources[resource] except KeyError: return False else: if supplied < value: return False return True fast_tasks = {"split-shuffle"}
34.286652
88
0.521029
import logging from collections import defaultdict, deque from math import log2 from time import time from tlz import topk from tornado.ioloop import PeriodicCallback import dask from dask.utils import parse_timedelta from .comm.addressing import get_address_host from .core import CommClosedError from .diagnostics.plugin import SchedulerPlugin from .utils import log_errors LATENCY = 10e-3 logger = logging.getLogger(__name__) LOG_PDB = dask.config.get("distributed.admin.pdb-on-err") class WorkStealing(SchedulerPlugin): def __init__(self, scheduler): self.scheduler = scheduler self.stealable_all = [set() for i in range(15)] self.stealable = dict() self.key_stealable = dict() self.cost_multipliers = [1 + 2 ** (i - 6) for i in range(15)] self.cost_multipliers[0] = 1 for worker in scheduler.workers: self.add_worker(worker=worker) callback_time = parse_timedelta( dask.config.get("distributed.scheduler.work-stealing-interval"), default="ms", ) pc = PeriodicCallback(callback=self.balance, callback_time=callback_time * 1000) self._pc = pc self.scheduler.periodic_callbacks["stealing"] = pc self.scheduler.plugins.append(self) self.scheduler.extensions["stealing"] = self self.scheduler.events["stealing"] = deque(maxlen=100000) self.count = 0 self.in_flight = dict() self.in_flight_occupancy = defaultdict(lambda: 0) self.scheduler.stream_handlers["steal-response"] = self.move_task_confirm def log(self, msg): return self.scheduler.log_event("stealing", msg) def add_worker(self, scheduler=None, worker=None): self.stealable[worker] = [set() for i in range(15)] def remove_worker(self, scheduler=None, worker=None): del self.stealable[worker] def teardown(self): self._pc.stop() def transition( self, key, start, finish, compute_start=None, compute_stop=None, *args, **kwargs ): if finish == "processing": ts = self.scheduler.tasks[key] self.put_key_in_stealable(ts) elif start == "processing": ts = self.scheduler.tasks[key] self.remove_key_from_stealable(ts) if finish != "memory": self.in_flight.pop(ts, None) def put_key_in_stealable(self, ts): cost_multiplier, level = self.steal_time_ratio(ts) if cost_multiplier is not None: ws = ts.processing_on worker = ws.address self.stealable_all[level].add(ts) self.stealable[worker][level].add(ts) self.key_stealable[ts] = (worker, level) def remove_key_from_stealable(self, ts): result = self.key_stealable.pop(ts, None) if result is None: return worker, level = result try: self.stealable[worker][level].remove(ts) except KeyError: pass try: self.stealable_all[level].remove(ts) except KeyError: pass def steal_time_ratio(self, ts): if not ts.dependencies: return 0, 0 split = ts.prefix.name if split in fast_tasks: return None, None ws = ts.processing_on compute_time = ws.processing[ts] if compute_time < 0.005: return None, None nbytes = ts.get_nbytes_deps() transfer_time = nbytes / self.scheduler.bandwidth + LATENCY cost_multiplier = transfer_time / compute_time if cost_multiplier > 100: return None, None level = int(round(log2(cost_multiplier) + 6)) if level < 1: level = 1 return cost_multiplier, level def move_task_request(self, ts, victim, thief): try: if self.scheduler.validate: if victim is not ts.processing_on and LOG_PDB: import pdb pdb.set_trace() key = ts.key self.remove_key_from_stealable(ts) logger.debug( "Request move %s, %s: %2f -> %s: %2f", key, victim, victim.occupancy, thief, thief.occupancy, ) victim_duration = victim.processing[ts] thief_duration = self.scheduler.get_task_duration( ts ) + self.scheduler.get_comm_cost(ts, thief) self.scheduler.stream_comms[victim.address].send( {"op": "steal-request", "key": key} ) self.in_flight[ts] = { "victim": victim, "thief": thief, "victim_duration": victim_duration, "thief_duration": thief_duration, } self.in_flight_occupancy[victim] -= victim_duration self.in_flight_occupancy[thief] += thief_duration except CommClosedError: logger.info("Worker comm %r closed while stealing: %r", victim, ts) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise async def move_task_confirm(self, key=None, worker=None, state=None): try: try: ts = self.scheduler.tasks[key] except KeyError: logger.debug("Key released between request and confirm: %s", key) return try: d = self.in_flight.pop(ts) except KeyError: return thief = d["thief"] victim = d["victim"] logger.debug( "Confirm move %s, %s -> %s. State: %s", key, victim, thief, state ) self.in_flight_occupancy[thief] -= d["thief_duration"] self.in_flight_occupancy[victim] += d["victim_duration"] if not self.in_flight: self.in_flight_occupancy = defaultdict(lambda: 0) if ts.state != "processing" or ts.processing_on is not victim: old_thief = thief.occupancy new_thief = sum(thief.processing.values()) old_victim = victim.occupancy new_victim = sum(victim.processing.values()) thief.occupancy = new_thief victim.occupancy = new_victim self.scheduler.total_occupancy += ( new_thief - old_thief + new_victim - old_victim ) return if ( thief.address not in self.scheduler.workers or victim.address not in self.scheduler.workers ): self.scheduler.reschedule(key) return if state in ("memory", "executing", "long-running", None): self.log(("already-computing", key, victim.address, thief.address)) self.scheduler.check_idle_saturated(thief) self.scheduler.check_idle_saturated(victim) elif state in ("waiting", "ready", "constrained"): self.remove_key_from_stealable(ts) ts.processing_on = thief duration = victim.processing.pop(ts) victim.occupancy -= duration self.scheduler.total_occupancy -= duration if not victim.processing: self.scheduler.total_occupancy -= victim.occupancy victim.occupancy = 0 thief.processing[ts] = d["thief_duration"] thief.occupancy += d["thief_duration"] self.scheduler.total_occupancy += d["thief_duration"] self.put_key_in_stealable(ts) try: self.scheduler.send_task_to_worker(thief.address, ts) except CommClosedError: await self.scheduler.remove_worker(thief.address) self.log(("confirm", key, victim.address, thief.address)) else: raise ValueError("Unexpected task state: %s" % state) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise finally: try: self.scheduler.check_idle_saturated(thief) except Exception: pass try: self.scheduler.check_idle_saturated(victim) except Exception: pass def balance(self): s = self.scheduler def combined_occupancy(ws): return ws.occupancy + self.in_flight_occupancy[ws] def maybe_move_task(level, ts, sat, idl, duration, cost_multiplier): occ_idl = combined_occupancy(idl) occ_sat = combined_occupancy(sat) if occ_idl + cost_multiplier * duration <= occ_sat - duration / 2: self.move_task_request(ts, sat, idl) log.append( ( start, level, ts.key, duration, sat.address, occ_sat, idl.address, occ_idl, ) ) s.check_idle_saturated(sat, occ=occ_sat) s.check_idle_saturated(idl, occ=occ_idl) with log_errors(): i = 0 idle = s.idle.values() saturated = s.saturated if not idle or len(idle) == len(s.workers): return log = [] start = time() if not s.saturated: saturated = topk(10, s.workers.values(), key=combined_occupancy) saturated = [ ws for ws in saturated if combined_occupancy(ws) > 0.2 and len(ws.processing) > ws.nthreads ] elif len(s.saturated) < 20: saturated = sorted(saturated, key=combined_occupancy, reverse=True) if len(idle) < 20: idle = sorted(idle, key=combined_occupancy) for level, cost_multiplier in enumerate(self.cost_multipliers): if not idle: break for sat in list(saturated): stealable = self.stealable[sat.address][level] if not stealable or not idle: continue for ts in list(stealable): if ts not in self.key_stealable or ts.processing_on is not sat: stealable.discard(ts) continue i += 1 if not idle: break if _has_restrictions(ts): thieves = [ws for ws in idle if _can_steal(ws, ts, sat)] else: thieves = idle if not thieves: break thief = thieves[i % len(thieves)] duration = sat.processing.get(ts) if duration is None: stealable.discard(ts) continue maybe_move_task( level, ts, sat, thief, duration, cost_multiplier ) if self.cost_multipliers[level] < 20: stealable = self.stealable_all[level] for ts in list(stealable): if not idle: break if ts not in self.key_stealable: stealable.discard(ts) continue sat = ts.processing_on if sat is None: stealable.discard(ts) continue if combined_occupancy(sat) < 0.2: continue if len(sat.processing) <= sat.nthreads: continue i += 1 if _has_restrictions(ts): thieves = [ws for ws in idle if _can_steal(ws, ts, sat)] else: thieves = idle if not thieves: continue thief = thieves[i % len(thieves)] duration = sat.processing[ts] maybe_move_task( level, ts, sat, thief, duration, cost_multiplier ) if log: self.log(log) self.count += 1 stop = time() if s.digests: s.digests["steal-duration"].add(stop - start) def restart(self, scheduler): for stealable in self.stealable.values(): for s in stealable: s.clear() for s in self.stealable_all: s.clear() self.key_stealable.clear() def story(self, *keys): keys = set(keys) out = [] for _, L in self.scheduler.get_event("stealing"): if not isinstance(L, list): L = [L] for t in L: if any(x in keys for x in t): out.append(t) return out def _has_restrictions(ts): return not ts.loose_restrictions and ( ts.host_restrictions or ts.worker_restrictions or ts.resource_restrictions ) def _can_steal(thief, ts, victim): if ( ts.host_restrictions and get_address_host(thief.address) not in ts.host_restrictions ): return False elif ts.worker_restrictions and thief.address not in ts.worker_restrictions: return False if victim.resources is None: return True for resource, value in victim.resources.items(): try: supplied = thief.resources[resource] except KeyError: return False else: if supplied < value: return False return True fast_tasks = {"split-shuffle"}
true
true
1c45e29f710037c91f2e554142171caeedf3bf05
4,923
py
Python
src/lidar.py
steviet91/furmulaone_source
ca738b271fa346da4234c5ffc781abc12a5ac49f
[ "MIT" ]
null
null
null
src/lidar.py
steviet91/furmulaone_source
ca738b271fa346da4234c5ffc781abc12a5ac49f
[ "MIT" ]
24
2020-04-14T12:38:07.000Z
2020-04-29T08:18:33.000Z
src/lidar.py
steviet91/furmulaone_source
ca738b271fa346da4234c5ffc781abc12a5ac49f
[ "MIT" ]
null
null
null
import numpy as np from .track import TrackHandler from .geom import Line from .geom import Circle from .geom import get_intersection_point_lineseg_lineseg from .geom import calc_euclid_distance_2d from .geom import rotate_point import time #from multiprocessing import Pool class Lidar(object): """ Object for the LIDAR containing the rays and intersect data """ _NRays = 3 _xLidarRange = float(200) def __init__(self, track: TrackHandler, a0: float, x0: float, y0: float, aFov: float): """ Initialise the LIDAR object """ # save the arguments self.track = track self.a0 = a0 # the initial nominal angle of the lidar (the centre ray) self.x0 = x0 # the position of the lidar self.y0 = y0 # the y position of the lidar self.aFov = aFov # the field of view of the lidar # initialise the lidar rays self.initialise_rays() # initialise the lidar collision circle self.collisionCircle = Circle(self.x0, self.y0, Lidar._xLidarRange) # intialise the collision array self.initialise_collision_array() # initialise the mp pool #self.pool = Pool(4) def initialise_rays(self): """ Set up the rays that represent the lidar """ self.rays = [] for i in range(0,Lidar._NRays): a = self.a0 - self.aFov / 2 + i * self.aFov / (Lidar._NRays - 1) # angle of this ray # going to work as a unit vector x = np.cos(a) y = np.sin(a) # instantiate the ray as a line self.rays.append(Line((self.x0, self.y0), (self.x0 + x, self.y0 + y))) def initialise_collision_array(self): """ Collision array contains the distance of the collision for each ray. A negative number (-1) infers no collision found """ self.collision_array = -1.0 * np.ones(Lidar._NRays, dtype=np.float64) def rotate_lidar_by_delta(self, daRot: float, cX: float, cY: float): """ Rotate the lidars about a pivot point """ for r in self.rays: r.rotate_line_by_delta(daRot, cX, cY) pNew = rotate_point(cX, cY, daRot, np.array([self.x0, self.y0])) self.x0 = pNew[0] self.y0 = pNew[1] def translate_lidars_by_delta(self, dX: float, dY: float): """ Translate the lidars by the given deltas """ for r in self.rays: r.translate_line_by_delta(dX, dY) self.collisionCircle.update_centre_by_delta(dX, dY) self.x0 += dX self.y0 += dY def reset_lidar(self): """ Reset the lidars to their previous position/angles """ for r in self.rays: r.reset_line() self.collisionCircle.update_centre_to_new_pos(self.x0, self.y0) self.initialise_collision_array() def fire_lidar(self): """ Determine the distance to the nearest """ # find the indexes of the track segments to check collision for in_idxs, out_idxs = self.track.get_line_idxs_for_collision(self.collisionCircle) # get the objects of the lines that should be checked for collision check_lines = [] if len(in_idxs) > 0: check_lines.extend([self.track.data.in_lines[i] for i in in_idxs]) if len(out_idxs) > 0: check_lines.extend([self.track.data.out_lines[i] for i in out_idxs]) # calculate the collision array self.collision_array = np.array([self.check_rays(r, check_lines) for r in self.rays]) def check_rays(self, r: Line, check_lines: list): """ Calculate the minimum distance to each of the provided lines """ ds = np.array([self.cast_ray(r, l) for l in check_lines]) # if the ray scored a collision then find the minimum distance (-1 is no collision) if np.max(ds) > 0: # get the minimun distance return np.min(ds[np.where(ds > 0)[0]]) else: return float(-1) def cast_ray(self, r: Line, l: Line): """ Cast the ray r and return the distance to the line l if less than lidar range """ pInt = get_intersection_point_lineseg_lineseg(l, r, l2_is_ray=True) if pInt is None: return float(-1) else: d = float(calc_euclid_distance_2d(tuple(r.p1), tuple(pInt))) if d <= Lidar._xLidarRange: return d else: # collisiion is out of range return float(-1) if __name__ == "__main__": import time t = TrackHandler('octo_track') l = Lidar(t, 0, 0, 0, 20) n = 1000 t = time.time() for i in range(0,n): l.fire_lidar() print((time.time()-t) * 1000 / n,'ms')
33.719178
97
0.589681
import numpy as np from .track import TrackHandler from .geom import Line from .geom import Circle from .geom import get_intersection_point_lineseg_lineseg from .geom import calc_euclid_distance_2d from .geom import rotate_point import time class Lidar(object): _NRays = 3 _xLidarRange = float(200) def __init__(self, track: TrackHandler, a0: float, x0: float, y0: float, aFov: float): self.track = track self.a0 = a0 self.x0 = x0 self.y0 = y0 self.aFov = aFov self.initialise_rays() self.collisionCircle = Circle(self.x0, self.y0, Lidar._xLidarRange) self.initialise_collision_array() def initialise_rays(self): self.rays = [] for i in range(0,Lidar._NRays): a = self.a0 - self.aFov / 2 + i * self.aFov / (Lidar._NRays - 1) x = np.cos(a) y = np.sin(a) self.rays.append(Line((self.x0, self.y0), (self.x0 + x, self.y0 + y))) def initialise_collision_array(self): self.collision_array = -1.0 * np.ones(Lidar._NRays, dtype=np.float64) def rotate_lidar_by_delta(self, daRot: float, cX: float, cY: float): for r in self.rays: r.rotate_line_by_delta(daRot, cX, cY) pNew = rotate_point(cX, cY, daRot, np.array([self.x0, self.y0])) self.x0 = pNew[0] self.y0 = pNew[1] def translate_lidars_by_delta(self, dX: float, dY: float): for r in self.rays: r.translate_line_by_delta(dX, dY) self.collisionCircle.update_centre_by_delta(dX, dY) self.x0 += dX self.y0 += dY def reset_lidar(self): for r in self.rays: r.reset_line() self.collisionCircle.update_centre_to_new_pos(self.x0, self.y0) self.initialise_collision_array() def fire_lidar(self): in_idxs, out_idxs = self.track.get_line_idxs_for_collision(self.collisionCircle) check_lines = [] if len(in_idxs) > 0: check_lines.extend([self.track.data.in_lines[i] for i in in_idxs]) if len(out_idxs) > 0: check_lines.extend([self.track.data.out_lines[i] for i in out_idxs]) self.collision_array = np.array([self.check_rays(r, check_lines) for r in self.rays]) def check_rays(self, r: Line, check_lines: list): ds = np.array([self.cast_ray(r, l) for l in check_lines]) if np.max(ds) > 0: return np.min(ds[np.where(ds > 0)[0]]) else: return float(-1) def cast_ray(self, r: Line, l: Line): pInt = get_intersection_point_lineseg_lineseg(l, r, l2_is_ray=True) if pInt is None: return float(-1) else: d = float(calc_euclid_distance_2d(tuple(r.p1), tuple(pInt))) if d <= Lidar._xLidarRange: return d else: return float(-1) if __name__ == "__main__": import time t = TrackHandler('octo_track') l = Lidar(t, 0, 0, 0, 20) n = 1000 t = time.time() for i in range(0,n): l.fire_lidar() print((time.time()-t) * 1000 / n,'ms')
true
true
1c45e2b8f9e20b2f3b7d0e052c8fd311816a6a16
1,984
py
Python
carball/analysis2/stats/demo_stats.py
twobackfromtheend/carball
6dcc3f7f0f2266cc3e0a3de24deaac2aec392b73
[ "Apache-2.0" ]
null
null
null
carball/analysis2/stats/demo_stats.py
twobackfromtheend/carball
6dcc3f7f0f2266cc3e0a3de24deaac2aec392b73
[ "Apache-2.0" ]
null
null
null
carball/analysis2/stats/demo_stats.py
twobackfromtheend/carball
6dcc3f7f0f2266cc3e0a3de24deaac2aec392b73
[ "Apache-2.0" ]
null
null
null
from collections import Counter from typing import Dict, List import numpy as np import pandas as pd from api.analysis.stats_pb2 import PlayerStats from api.events.demo_pb2 import Demo from api.game.game_pb2 import Game from carball.analysis2.constants.constants import FIELD_Y_LIM, FIELD_X_LIM def set_demo_stats(player_stats: Dict[str, PlayerStats], game: Game, demos: List[Demo], player_blue_data_frames: Dict[str, pd.DataFrame]): player_id_to_name: Dict[str, str] = {player.id.id: player.name for player in game.players} demo_counts = Counter() demoed_counts = Counter() demos_near_opponent_goal_counts = Counter() demoed_near_own_goal_counts = Counter() active_frames = list(player_blue_data_frames.values())[0].index for demo in demos: frame_number = demo.frame_number if frame_number not in active_frames: continue attacker_id = demo.attacker_id.id victim_id = demo.victim_id.id demo_counts[attacker_id] += 1 demoed_counts[victim_id] += 1 victim_blue_df = player_blue_data_frames[victim_id] victim_name = player_id_to_name[victim_id] victim_position_at_demo = victim_blue_df.loc[frame_number - 1, (victim_name, ['pos_x', 'pos_y'])].values BLUE_GOAL_POSITION = np.array([0, -FIELD_Y_LIM]) victim_distance_from_goal = ((victim_position_at_demo - BLUE_GOAL_POSITION) ** 2).sum() ** 0.5 if victim_distance_from_goal < FIELD_X_LIM / 2: demos_near_opponent_goal_counts[attacker_id] += 1 demoed_near_own_goal_counts[victim_id] += 1 for player_id, _player_stats in player_stats.items(): _player_stats.demos = demo_counts[player_id] _player_stats.demoed = demoed_counts[player_id] _player_stats.demos_near_opponent_goal = demos_near_opponent_goal_counts[player_id] _player_stats.demoed_near_own_goal = demoed_near_own_goal_counts[player_id]
39.68
112
0.720766
from collections import Counter from typing import Dict, List import numpy as np import pandas as pd from api.analysis.stats_pb2 import PlayerStats from api.events.demo_pb2 import Demo from api.game.game_pb2 import Game from carball.analysis2.constants.constants import FIELD_Y_LIM, FIELD_X_LIM def set_demo_stats(player_stats: Dict[str, PlayerStats], game: Game, demos: List[Demo], player_blue_data_frames: Dict[str, pd.DataFrame]): player_id_to_name: Dict[str, str] = {player.id.id: player.name for player in game.players} demo_counts = Counter() demoed_counts = Counter() demos_near_opponent_goal_counts = Counter() demoed_near_own_goal_counts = Counter() active_frames = list(player_blue_data_frames.values())[0].index for demo in demos: frame_number = demo.frame_number if frame_number not in active_frames: continue attacker_id = demo.attacker_id.id victim_id = demo.victim_id.id demo_counts[attacker_id] += 1 demoed_counts[victim_id] += 1 victim_blue_df = player_blue_data_frames[victim_id] victim_name = player_id_to_name[victim_id] victim_position_at_demo = victim_blue_df.loc[frame_number - 1, (victim_name, ['pos_x', 'pos_y'])].values BLUE_GOAL_POSITION = np.array([0, -FIELD_Y_LIM]) victim_distance_from_goal = ((victim_position_at_demo - BLUE_GOAL_POSITION) ** 2).sum() ** 0.5 if victim_distance_from_goal < FIELD_X_LIM / 2: demos_near_opponent_goal_counts[attacker_id] += 1 demoed_near_own_goal_counts[victim_id] += 1 for player_id, _player_stats in player_stats.items(): _player_stats.demos = demo_counts[player_id] _player_stats.demoed = demoed_counts[player_id] _player_stats.demos_near_opponent_goal = demos_near_opponent_goal_counts[player_id] _player_stats.demoed_near_own_goal = demoed_near_own_goal_counts[player_id]
true
true
1c45e33ca52708a7eea05a4c0e1c5e724e56f6ba
3,556
py
Python
deploytesting/settings.py
mouaadBenAllal/deploytest
1011152d3a00879f450db3deaef64dee7c9009c0
[ "Apache-2.0" ]
null
null
null
deploytesting/settings.py
mouaadBenAllal/deploytest
1011152d3a00879f450db3deaef64dee7c9009c0
[ "Apache-2.0" ]
null
null
null
deploytesting/settings.py
mouaadBenAllal/deploytest
1011152d3a00879f450db3deaef64dee7c9009c0
[ "Apache-2.0" ]
null
null
null
""" Django settings for deploytesting project. Generated by 'django-admin startproject' using Django 1.10.5. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '!5+^n3g458fs0#x8=142wf+5xqkw2)nb5^_zkry-g2fl&8@rn_') # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECRET_KEY = '!5+^n3g458fs0#x8=142wf+5xqkw2)nb5^_zkry-g2fl&8@rn_' # SECURITY WARNING: don't run with debug turned on in production! # DEBUG = True DEBUG = bool(os.environ.get('DJANGO_DEBUG', True)) ALLOWED_HOSTS = ['Mouaadben.pythonanywhere.com'] # Application definition INSTALLED_APPS = [ 'deploytesting', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'deploytesting.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'deploytesting.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': '/home/Mouaadben/deploytest.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ # The absolute path to the directory where collectstatic will collect static files for deployment. STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') # The URL to use when referring to static files (where they will be served from) STATIC_URL = '/static/'
28.448
102
0.708661
import os SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '!5+^n3g458fs0#x8=142wf+5xqkw2)nb5^_zkry-g2fl&8@rn_') BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # DEBUG = True DEBUG = bool(os.environ.get('DJANGO_DEBUG', True)) ALLOWED_HOSTS = ['Mouaadben.pythonanywhere.com'] # Application definition INSTALLED_APPS = [ 'deploytesting', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'deploytesting.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'deploytesting.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': '/home/Mouaadben/deploytest.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ # The absolute path to the directory where collectstatic will collect static files for deployment. STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') # The URL to use when referring to static files (where they will be served from) STATIC_URL = '/static/'
true
true
1c45e454061369b7ef00827dbb014ceacea5b029
212
py
Python
lambdata_andrewarnett/__init__.py
AndrewArnett/lambdata
fe7e2694a0a099f9df88807f744556c230e9f18d
[ "MIT" ]
null
null
null
lambdata_andrewarnett/__init__.py
AndrewArnett/lambdata
fe7e2694a0a099f9df88807f744556c230e9f18d
[ "MIT" ]
null
null
null
lambdata_andrewarnett/__init__.py
AndrewArnett/lambdata
fe7e2694a0a099f9df88807f744556c230e9f18d
[ "MIT" ]
1
2020-08-04T19:20:50.000Z
2020-08-04T19:20:50.000Z
""" lambdata - a collection of Data Science helper functions """ import pandas as pd import numpy as np from lambdata_andrewarnett.dataframe_helper import shape_head, baseline TEST = pd.DataFrame(np.ones(10))
19.272727
71
0.783019
import pandas as pd import numpy as np from lambdata_andrewarnett.dataframe_helper import shape_head, baseline TEST = pd.DataFrame(np.ones(10))
true
true
1c45e47eae4c91731680b35fa0da9f2d7d523680
10,928
py
Python
mindmeld/components/_elasticsearch_helpers.py
jre21/mindmeld
6a88e4b0dfc7971f6bf9ae406b89dbc76f68af81
[ "Apache-2.0" ]
1
2021-01-06T23:39:57.000Z
2021-01-06T23:39:57.000Z
mindmeld/components/_elasticsearch_helpers.py
jre21/mindmeld
6a88e4b0dfc7971f6bf9ae406b89dbc76f68af81
[ "Apache-2.0" ]
1
2021-02-02T22:53:01.000Z
2021-02-02T22:53:01.000Z
mindmeld/components/_elasticsearch_helpers.py
jre21/mindmeld
6a88e4b0dfc7971f6bf9ae406b89dbc76f68af81
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module contains helper methods for consuming Elasticsearch.""" import os import logging from elasticsearch5 import (Elasticsearch, ImproperlyConfigured, ElasticsearchException, ConnectionError as EsConnectionError, TransportError) from elasticsearch5.helpers import streaming_bulk from tqdm import tqdm from ._config import DEFAULT_ES_INDEX_TEMPLATE, DEFAULT_ES_INDEX_TEMPLATE_NAME from ..exceptions import KnowledgeBaseConnectionError, KnowledgeBaseError logger = logging.getLogger(__name__) INDEX_TYPE_SYNONYM = 'syn' INDEX_TYPE_KB = 'kb' def get_scoped_index_name(app_namespace, index_name): return '{}${}'.format(app_namespace, index_name) def create_es_client(es_host=None, es_user=None, es_pass=None): """Creates a new Elasticsearch client Args: es_host (str): The Elasticsearch host server es_user (str): The Elasticsearch username for http auth es_pass (str): The Elasticsearch password for http auth """ es_host = es_host or os.environ.get('MM_ES_HOST') es_user = es_user or os.environ.get('MM_ES_USERNAME') es_pass = es_pass or os.environ.get('MM_ES_PASSWORD') try: http_auth = (es_user, es_pass) if es_user and es_pass else None es_client = Elasticsearch(es_host, http_auth=http_auth) return es_client except ElasticsearchException: raise KnowledgeBaseError except ImproperlyConfigured: raise KnowledgeBaseError def does_index_exist(app_namespace, index_name, es_host=None, es_client=None, connect_timeout=2): """Return boolean flag to indicate whether the specified index exists.""" es_client = es_client or create_es_client(es_host) scoped_index_name = get_scoped_index_name(app_namespace, index_name) try: # Confirm ES connection with a shorter timeout es_client.cluster.health(request_timeout=connect_timeout) return es_client.indices.exists(index=scoped_index_name) except EsConnectionError as e: logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info) raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts) except TransportError as e: logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s ' 'Status code: %s details: %s', e.error, e.status_code, e.info) raise KnowledgeBaseError except ElasticsearchException: raise KnowledgeBaseError def get_field_names(app_namespace, index_name, es_host=None, es_client=None, connect_timeout=2): """Return a list of field names available in the specified index.""" es_client = es_client or create_es_client(es_host) scoped_index_name = get_scoped_index_name(app_namespace, index_name) try: if not does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout): raise ValueError('Elasticsearch index \'{}\' does not exist.'.format(index_name)) res = es_client.indices.get(index=scoped_index_name) all_field_info = res[scoped_index_name]['mappings']['document']['properties'] return all_field_info.keys() except EsConnectionError as e: logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info) raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts) except TransportError as e: logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s ' 'Status code: %s details: %s', e.error, e.status_code, e.info) raise KnowledgeBaseError except ElasticsearchException: raise KnowledgeBaseError def create_index(app_namespace, index_name, mapping, es_host=None, es_client=None, connect_timeout=2): """Creates a new index. Args: app_namespace (str): The namespace of the app index_name (str): The name of the new index to be created mapping (str): The Elasticsearch index mapping to use es_host (str): The Elasticsearch host server es_client: The Elasticsearch client connect_timeout (int, optional): The amount of time for a connection to the Elasticsearch host """ es_client = es_client or create_es_client(es_host) scoped_index_name = get_scoped_index_name(app_namespace, index_name) try: if not does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout): # checks the existence of default index template, if not then creates it. if not es_client.indices.exists_template(name=DEFAULT_ES_INDEX_TEMPLATE_NAME): es_client.indices.put_template(name=DEFAULT_ES_INDEX_TEMPLATE_NAME, body=DEFAULT_ES_INDEX_TEMPLATE) logger.info('Creating index %r', index_name) es_client.indices.create(scoped_index_name, body=mapping) else: logger.error('Index %r already exists.', index_name) except EsConnectionError as e: logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info) raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts) except TransportError as e: logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s ' 'Status code: %s details: %s', e.error, e.status_code, e.info) raise KnowledgeBaseError('Unexpected error occurred when sending requests to ' 'Elasticsearch: {} Status code: {} details: ' '{}'.format(e.error, e.status_code, e.info)) except ElasticsearchException: raise KnowledgeBaseError def delete_index(app_namespace, index_name, es_host=None, es_client=None, connect_timeout=2): """Deletes an index. Args: app_namespace (str): The namespace of the app index_name (str): The name of the index to be deleted es_host (str): The Elasticsearch host server es_client: The Elasticsearch client connect_timeout (int, optional): The amount of time for a connection to the Elasticsearch host """ es_client = es_client or create_es_client(es_host) scoped_index_name = get_scoped_index_name(app_namespace, index_name) try: if does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout): logger.info('Deleting index %r', index_name) es_client.indices.delete(scoped_index_name) else: raise ValueError('Elasticsearch index \'{}\' for application \'{}\' does not exist.' .format(index_name, app_namespace)) except EsConnectionError as e: logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info) raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts) except TransportError as e: logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s ' 'Status code: %s details: %s', e.error, e.status_code, e.info) raise KnowledgeBaseError except ElasticsearchException: raise KnowledgeBaseError def load_index(app_namespace, index_name, docs, docs_count, mapping, doc_type, es_host=None, es_client=None, connect_timeout=2): """Loads documents from data into the specified index. If an index with the specified name doesn't exist, a new index with that name will be created. Args: app_namespace (str): The namespace of the app index_name (str): The name of the new index to be created docs (iterable): An iterable which contains a collection of documents in the correct format which should be imported into the index docs_count (int): The number of documents in doc mapping (str): The Elasticsearch index mapping to use doc_type (str): The document type es_host (str): The Elasticsearch host server es_client (Elasticsearch): The Elasticsearch client connect_timeout (int, optional): The amount of time for a connection to the Elasticsearch host """ scoped_index_name = get_scoped_index_name(app_namespace, index_name) es_client = es_client or create_es_client(es_host) try: # create index if specified index does not exist if does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout): logger.warning('Elasticsearch index \'%s\' for application \'%s\' already exists!', index_name, app_namespace) logger.info('Loading index %r', index_name) else: create_index(app_namespace, index_name, mapping, es_host=es_host, es_client=es_client) count = 0 # create the progess bar with docs count pbar = tqdm(total=docs_count) for okay, result in streaming_bulk(es_client, docs, index=scoped_index_name, doc_type=doc_type, chunk_size=50, raise_on_error=False): action, result = result.popitem() doc_id = '/%s/%s/%s' % (index_name, doc_type, result['_id']) # process the information from ES whether the document has been # successfully indexed if not okay: logger.error('Failed to %s document %s: %r', action, doc_id, result) else: count += 1 pbar.update(1) # close the progress bar and flush all output pbar.close() logger.info('Loaded %s document%s', count, '' if count == 1 else 's') except EsConnectionError as e: logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info) raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts) except TransportError as e: logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s ' 'Status code: %s details: %s', e.error, e.status_code, e.info) raise KnowledgeBaseError except ElasticsearchException: raise KnowledgeBaseError
46.901288
99
0.685212
import os import logging from elasticsearch5 import (Elasticsearch, ImproperlyConfigured, ElasticsearchException, ConnectionError as EsConnectionError, TransportError) from elasticsearch5.helpers import streaming_bulk from tqdm import tqdm from ._config import DEFAULT_ES_INDEX_TEMPLATE, DEFAULT_ES_INDEX_TEMPLATE_NAME from ..exceptions import KnowledgeBaseConnectionError, KnowledgeBaseError logger = logging.getLogger(__name__) INDEX_TYPE_SYNONYM = 'syn' INDEX_TYPE_KB = 'kb' def get_scoped_index_name(app_namespace, index_name): return '{}${}'.format(app_namespace, index_name) def create_es_client(es_host=None, es_user=None, es_pass=None): es_host = es_host or os.environ.get('MM_ES_HOST') es_user = es_user or os.environ.get('MM_ES_USERNAME') es_pass = es_pass or os.environ.get('MM_ES_PASSWORD') try: http_auth = (es_user, es_pass) if es_user and es_pass else None es_client = Elasticsearch(es_host, http_auth=http_auth) return es_client except ElasticsearchException: raise KnowledgeBaseError except ImproperlyConfigured: raise KnowledgeBaseError def does_index_exist(app_namespace, index_name, es_host=None, es_client=None, connect_timeout=2): es_client = es_client or create_es_client(es_host) scoped_index_name = get_scoped_index_name(app_namespace, index_name) try: es_client.cluster.health(request_timeout=connect_timeout) return es_client.indices.exists(index=scoped_index_name) except EsConnectionError as e: logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info) raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts) except TransportError as e: logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s ' 'Status code: %s details: %s', e.error, e.status_code, e.info) raise KnowledgeBaseError except ElasticsearchException: raise KnowledgeBaseError def get_field_names(app_namespace, index_name, es_host=None, es_client=None, connect_timeout=2): es_client = es_client or create_es_client(es_host) scoped_index_name = get_scoped_index_name(app_namespace, index_name) try: if not does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout): raise ValueError('Elasticsearch index \'{}\' does not exist.'.format(index_name)) res = es_client.indices.get(index=scoped_index_name) all_field_info = res[scoped_index_name]['mappings']['document']['properties'] return all_field_info.keys() except EsConnectionError as e: logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info) raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts) except TransportError as e: logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s ' 'Status code: %s details: %s', e.error, e.status_code, e.info) raise KnowledgeBaseError except ElasticsearchException: raise KnowledgeBaseError def create_index(app_namespace, index_name, mapping, es_host=None, es_client=None, connect_timeout=2): es_client = es_client or create_es_client(es_host) scoped_index_name = get_scoped_index_name(app_namespace, index_name) try: if not does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout): if not es_client.indices.exists_template(name=DEFAULT_ES_INDEX_TEMPLATE_NAME): es_client.indices.put_template(name=DEFAULT_ES_INDEX_TEMPLATE_NAME, body=DEFAULT_ES_INDEX_TEMPLATE) logger.info('Creating index %r', index_name) es_client.indices.create(scoped_index_name, body=mapping) else: logger.error('Index %r already exists.', index_name) except EsConnectionError as e: logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info) raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts) except TransportError as e: logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s ' 'Status code: %s details: %s', e.error, e.status_code, e.info) raise KnowledgeBaseError('Unexpected error occurred when sending requests to ' 'Elasticsearch: {} Status code: {} details: ' '{}'.format(e.error, e.status_code, e.info)) except ElasticsearchException: raise KnowledgeBaseError def delete_index(app_namespace, index_name, es_host=None, es_client=None, connect_timeout=2): es_client = es_client or create_es_client(es_host) scoped_index_name = get_scoped_index_name(app_namespace, index_name) try: if does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout): logger.info('Deleting index %r', index_name) es_client.indices.delete(scoped_index_name) else: raise ValueError('Elasticsearch index \'{}\' for application \'{}\' does not exist.' .format(index_name, app_namespace)) except EsConnectionError as e: logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info) raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts) except TransportError as e: logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s ' 'Status code: %s details: %s', e.error, e.status_code, e.info) raise KnowledgeBaseError except ElasticsearchException: raise KnowledgeBaseError def load_index(app_namespace, index_name, docs, docs_count, mapping, doc_type, es_host=None, es_client=None, connect_timeout=2): scoped_index_name = get_scoped_index_name(app_namespace, index_name) es_client = es_client or create_es_client(es_host) try: if does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout): logger.warning('Elasticsearch index \'%s\' for application \'%s\' already exists!', index_name, app_namespace) logger.info('Loading index %r', index_name) else: create_index(app_namespace, index_name, mapping, es_host=es_host, es_client=es_client) count = 0 pbar = tqdm(total=docs_count) for okay, result in streaming_bulk(es_client, docs, index=scoped_index_name, doc_type=doc_type, chunk_size=50, raise_on_error=False): action, result = result.popitem() doc_id = '/%s/%s/%s' % (index_name, doc_type, result['_id']) if not okay: logger.error('Failed to %s document %s: %r', action, doc_id, result) else: count += 1 pbar.update(1) pbar.close() logger.info('Loaded %s document%s', count, '' if count == 1 else 's') except EsConnectionError as e: logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info) raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts) except TransportError as e: logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s ' 'Status code: %s details: %s', e.error, e.status_code, e.info) raise KnowledgeBaseError except ElasticsearchException: raise KnowledgeBaseError
true
true
1c45e48a779bc28e0a8409233594b33b5c303ad5
376
py
Python
test/test_ynab.py
quinnhosler/ynab-sdk-python
4ef8040bb44216212a84c8990329dcf63972e0fa
[ "Apache-2.0" ]
null
null
null
test/test_ynab.py
quinnhosler/ynab-sdk-python
4ef8040bb44216212a84c8990329dcf63972e0fa
[ "Apache-2.0" ]
null
null
null
test/test_ynab.py
quinnhosler/ynab-sdk-python
4ef8040bb44216212a84c8990329dcf63972e0fa
[ "Apache-2.0" ]
null
null
null
from unittest import TestCase from test.support.dummy_client import DummyClient from ynab_sdk import YNAB class YNABTest(TestCase): ynab: YNAB client: DummyClient def setUp(self): self.client = DummyClient() self.ynab = YNAB(client=self.client) def test_client_requires_key_or_client(self): self.assertRaises(AssertionError, YNAB)
22.117647
49
0.728723
from unittest import TestCase from test.support.dummy_client import DummyClient from ynab_sdk import YNAB class YNABTest(TestCase): ynab: YNAB client: DummyClient def setUp(self): self.client = DummyClient() self.ynab = YNAB(client=self.client) def test_client_requires_key_or_client(self): self.assertRaises(AssertionError, YNAB)
true
true
1c45e570f903cc0c5df4101b24907713afacfe1b
1,034
py
Python
test/app/all_tests.py
chuyqa/pydoop
575f56cc66381fef08981a2452acde02bddf0363
[ "Apache-2.0" ]
1
2021-03-22T02:22:30.000Z
2021-03-22T02:22:30.000Z
test/app/all_tests.py
chuyqa/pydoop
575f56cc66381fef08981a2452acde02bddf0363
[ "Apache-2.0" ]
null
null
null
test/app/all_tests.py
chuyqa/pydoop
575f56cc66381fef08981a2452acde02bddf0363
[ "Apache-2.0" ]
null
null
null
# BEGIN_COPYRIGHT # # Copyright 2009-2018 CRS4. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # END_COPYRIGHT import unittest from pydoop.test_utils import get_module TEST_MODULE_NAMES = [ 'test_submit', ] def suite(path=None): suites = [] for module in TEST_MODULE_NAMES: suites.append(get_module(module, path).suite()) return unittest.TestSuite(suites) if __name__ == '__main__': import sys _RESULT = unittest.TextTestRunner(verbosity=2).run(suite()) sys.exit(not _RESULT.wasSuccessful())
26.512821
77
0.737911
import unittest from pydoop.test_utils import get_module TEST_MODULE_NAMES = [ 'test_submit', ] def suite(path=None): suites = [] for module in TEST_MODULE_NAMES: suites.append(get_module(module, path).suite()) return unittest.TestSuite(suites) if __name__ == '__main__': import sys _RESULT = unittest.TextTestRunner(verbosity=2).run(suite()) sys.exit(not _RESULT.wasSuccessful())
true
true
1c45e61545b2b62f4211d66f7c74da507f7af9e4
15,352
py
Python
tests/unit/test_hooks.py
i386x/tox-lsr
22f4d63d58050b1c1bee2e91eb239c31f35cfd13
[ "MIT" ]
null
null
null
tests/unit/test_hooks.py
i386x/tox-lsr
22f4d63d58050b1c1bee2e91eb239c31f35cfd13
[ "MIT" ]
null
null
null
tests/unit/test_hooks.py
i386x/tox-lsr
22f4d63d58050b1c1bee2e91eb239c31f35cfd13
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # SPDX-License-Identifier: MIT # """Tests for tox_lsr hooks.""" import os import shutil import tempfile try: from unittest import mock as unittest_mock from unittest.mock import MagicMock, Mock, patch except ImportError: import mock as unittest_mock from mock import MagicMock, Mock, patch from copy import deepcopy import pkg_resources # I have no idea why pylint complains about this. This works: # command = python -c 'import py; print(dir(py.iniconfig))' # bug in pylint? anyway, just ignore it # in addition - pylint does not allow me to disable it # on the same line, so I have to disable it before the line # pylint: disable=no-member,no-name-in-module,import-error import py.iniconfig import unittest2 from tox_lsr.hooks import ( CONFIG_FILES_SUBDIR, LSR_CONFIG_SECTION, LSR_ENABLE, LSR_ENABLE_ENV, SCRIPT_NAME, TOX_DEFAULT_INI, _LSRPath, is_lsr_enabled, merge_config, merge_envconf, merge_ini, merge_prop_values, prop_is_set, set_prop_values_ini, tox_addoption, tox_configure, ) from .utils import MockConfig # code uses some protected members such as _cfg, _parser, _reader # pylint: disable=protected-access class HooksTestCase(unittest2.TestCase): def setUp(self): self.toxworkdir = tempfile.mkdtemp() patch( "pkg_resources.resource_filename", return_value=self.toxworkdir + "/" + SCRIPT_NAME, ).start() self.default_tox_ini_b = pkg_resources.resource_string( "tox_lsr", CONFIG_FILES_SUBDIR + "/" + TOX_DEFAULT_INI ) self.default_tox_ini_raw = self.default_tox_ini_b.decode() # e.g. __file__ is tests/unit/something.py - # fixture_path is tests/fixtures self.tests_path = os.path.dirname( os.path.dirname(os.path.abspath(__file__)) ) self.fixture_path = os.path.join( self.tests_path, "fixtures", self.id().split(".")[-1] ) def tearDown(self): shutil.rmtree(self.toxworkdir) patch.stopall() def test_tox_addoption(self): """Test tox_addoption.""" parser = Mock(add_argument=Mock()) tox_addoption(parser) self.assertEqual(1, parser.add_argument.call_count) def test_tox_configure(self): """Test tox_configure.""" config = MockConfig(toxworkdir=self.toxworkdir) with patch( "tox_lsr.hooks.is_lsr_enabled", return_value=False ) as mock_ile: tox_configure(config) self.assertEqual(1, mock_ile.call_count) setattr(config.option, LSR_ENABLE, True) default_config = MockConfig(toxworkdir=self.toxworkdir) with patch( "pkg_resources.resource_string", return_value=self.default_tox_ini_b, ) as mock_rs: with patch("tox_lsr.hooks.merge_config") as mock_mc: with patch( "tox_lsr.hooks.merge_ini", return_value=self.default_tox_ini_raw, ) as mock_mi: with patch( "tox_lsr.hooks.Config", side_effect=[TypeError(), default_config], ) as mock_cfg: with patch( "tox_lsr.hooks.ParseIni", side_effect=[TypeError(), None], ) as mock_pi: tox_configure(config) self.assertEqual(1, mock_rs.call_count) self.assertEqual(2, mock_pi.call_count) self.assertEqual(1, mock_mc.call_count) self.assertEqual(1, mock_mi.call_count) self.assertEqual(2, mock_cfg.call_count) def test_tox_merge_ini(self): """Test that given config is merged with default config ini.""" config = MockConfig(toxworkdir=self.toxworkdir) tox_ini_file = os.path.join(self.fixture_path, "tox.ini") config._cfg = py.iniconfig.IniConfig(tox_ini_file) result = merge_ini(config, self.default_tox_ini_raw) # check the result expected_file = os.path.join(self.fixture_path, "result.ini") expected_ini = py.iniconfig.IniConfig(expected_file) result_ini = py.iniconfig.IniConfig("", result) self.assertDictEqual(expected_ini.sections, result_ini.sections) def test_tox_prop_is_set(self): """Test prop_is_set.""" tec = Mock(envname="prop") tec._reader = Mock() tec._reader._cfg = Mock() cfgdict = { "empty_str_prop": "", "str_prop": "str_prop", "int_prop": 0, "bool_prop": False, "float_prop": 0.0, "list_prop": [1, 2, 3], "empty_list_prop": [], "dict_prop": {"a": "a"}, "empty_dict_prop": {}, "obj_prop": object(), "none_prop": None, } tec._reader._cfg.sections = deepcopy({"testenv": cfgdict}) for prop in cfgdict: self.assertTrue(prop_is_set(tec, prop)) tec._reader._cfg.sections["testenv:prop"] = deepcopy(cfgdict) for prop in cfgdict: self.assertTrue(prop_is_set(tec, prop)) del tec._reader._cfg.sections["testenv"] del tec._reader._cfg.sections["testenv:prop"] tec.configure_mock(**deepcopy(cfgdict)) for prop in cfgdict: self.assertFalse(prop_is_set(tec, prop)) def test_tox_merge_prop_values(self): """Test merge_prop_values.""" # assert that code ignores properties it does not handle tec = MagicMock() def_tec = MagicMock() merge_prop_values("nosuchprop", tec, def_tec) self.assertFalse(tec.mock_calls) self.assertFalse(def_tec.mock_calls) # test empty tec tec = MagicMock() def_tec = MagicMock() propnames = ["setenv", "deps", "passenv", "whitelist_externals"] empty_attrs = { "setenv": {}, "deps": [], "passenv": set(), "whitelist_externals": [], } tec.configure_mock(**deepcopy(empty_attrs)) full_attrs = { "setenv": {"a": "a", "b": "b"}, "deps": ["a", "b"], "passenv": set(["a", "b"]), "whitelist_externals": ["a", "b"], } def_tec.configure_mock(**deepcopy(full_attrs)) for prop in propnames: merge_prop_values(prop, tec, def_tec) for prop in propnames: val = getattr(tec, prop) exp_val = full_attrs[prop] if isinstance(val, list): self.assertEqual(set(exp_val), set(val)) else: self.assertEqual(exp_val, val) # test empty def_tec tec = MagicMock() def_tec = MagicMock() tec.configure_mock(**deepcopy(full_attrs)) def_tec.configure_mock(**deepcopy(empty_attrs)) for prop in propnames: merge_prop_values(prop, tec, def_tec) for prop in propnames: val = getattr(tec, prop) exp_val = full_attrs[prop] if isinstance(val, list): self.assertEqual(set(exp_val), set(val)) else: self.assertEqual(exp_val, val) # test merging more_attrs = { "setenv": {"a": "a", "c": "c"}, "deps": ["a", "c"], "passenv": set(["a", "c"]), "whitelist_externals": ["a", "c"], } result_attrs = { "setenv": {"a": "a", "b": "b", "c": "c"}, "deps": ["a", "b", "c"], "passenv": set(["a", "b", "c"]), "whitelist_externals": ["a", "b", "c"], } tec = MagicMock() def_tec = MagicMock() tec.configure_mock(**deepcopy(full_attrs)) def_tec.configure_mock(**deepcopy(more_attrs)) for prop in propnames: merge_prop_values(prop, tec, def_tec) for prop in propnames: val = getattr(tec, prop) exp_val = result_attrs[prop] if isinstance(val, list): self.assertEqual(set(exp_val), set(val)) else: self.assertEqual(exp_val, val) def test_tox_merge_envconf(self): """Test the merge_envconf method.""" # test exception handling prop = "unsettable" def mock_unsettable_is_set(envconf, propname): if propname != prop: return False if envconf == def_tec: return True return False def_tec = Mock(unsettable="unsettable") tec = Mock() with patch( "tox_lsr.hooks.prop_is_set", side_effect=mock_unsettable_is_set ): with patch("tox_lsr.hooks.setattr", side_effect=AttributeError()): merge_envconf(tec, def_tec) self.assertNotEqual(tec.unsettable, "unsettable") # test setting an unset property prop = "propa" def mock_prop_is_set(envconf, propname): if propname != prop: return False if envconf == def_tec: return True return False unittest_mock.FILTER_DIR = ( False # for handling attributes that start with underscore ) def_tec = Mock(spec=[prop], propa=prop, _ignoreme="ignoreme") tec = Mock(spec=[prop]) with patch("tox_lsr.hooks.prop_is_set", side_effect=mock_prop_is_set): merge_envconf(tec, def_tec) unittest_mock.FILTER_DIR = True # reset to default self.assertEqual(prop, tec.propa) # test that it tries to merge if both props are set # pylint: disable=unused-argument def mock_prop_is_set2(envconf, propname): if propname != prop: return False return True def_tec = Mock(spec=[prop], propa=prop) tec = Mock(spec=[prop], propa="someothervalue") with patch("tox_lsr.hooks.prop_is_set", side_effect=mock_prop_is_set2): with patch("tox_lsr.hooks.merge_prop_values") as mock_mpv: merge_envconf(tec, def_tec) self.assertEqual(1, mock_mpv.call_count) self.assertEqual("someothervalue", tec.propa) def test_tox_merge_config(self): """Test the merge_config method.""" tox_attrs = { "a": "a", "b": "b", } tec = Mock() tec._cfg = Mock() tec._cfg.sections = deepcopy({"tox": tox_attrs}) tec.configure_mock(**deepcopy(tox_attrs)) tec.envlist_explicit = False tec.envlist = ["a", "b"] tec.envlist_default = ["a", "b"] enva = {} envb = {} tec.envconfigs = {"a": enva, "b": envb} def_tox_attrs = {"a": "b", "b": "c", "c": "d", "_skip": "skip"} unittest_mock.FILTER_DIR = ( False # for handling attributes that start with underscore ) def_tec = Mock() def_tec._cfg = Mock() def_tec._cfg.sections = deepcopy({"tox": def_tox_attrs}) def_tec.configure_mock(**deepcopy(def_tox_attrs)) def_tec.envlist = ["b", "c"] def_tec.envlist_default = ["b", "c"] envc = {} def_tec.envconfigs = {"b": {}, "c": envc} with patch("tox_lsr.hooks.merge_envconf") as mock_me: merge_config(tec, def_tec) self.assertEqual(1, mock_me.call_count) self.assertIs(enva, tec.envconfigs["a"]) self.assertIs(envb, tec.envconfigs["b"]) self.assertIs(envc, tec.envconfigs["c"]) self.assertEqual("a", tec.a) self.assertEqual("b", tec.b) self.assertEqual("d", tec.c) self.assertEqual(set(["a", "b", "c"]), set(tec.envlist)) self.assertEqual(set(["a", "b", "c"]), set(tec.envlist_default)) unittest_mock.FILTER_DIR = True # reset def test_tox_set_set_prop_values_ini(self): """Test set_prop_values_ini.""" conf = {"a": "a", "b": "b"} def_conf = {} set_prop_values_ini("a", def_conf, conf) self.assertEqual({"a": "a"}, def_conf) set_prop_values_ini("a", def_conf, conf) self.assertEqual({"a": "a"}, def_conf) set_prop_values_ini("b", def_conf, conf) self.assertEqual({"a": "a", "b": "b"}, def_conf) set_prop_values_ini("a", def_conf, conf) self.assertEqual({"a": "a", "b": "b"}, def_conf) conf = { "setenv": "a\nb", "deps": "a", "passenv": "TEST_*", "whitelist_externals": "mycmd\nmyothercmd", } def_conf = { "setenv": "c\nd", "deps": "b", "passenv": "*", "whitelist_externals": "bash", } set_prop_values_ini("setenv", def_conf, conf) self.assertEqual("c\nd\na\nb", def_conf["setenv"]) set_prop_values_ini("deps", def_conf, conf) self.assertEqual("b\na", def_conf["deps"]) set_prop_values_ini("passenv", def_conf, conf) self.assertEqual("*\nTEST_*", def_conf["passenv"]) set_prop_values_ini("whitelist_externals", def_conf, conf) self.assertEqual( "bash\nmycmd\nmyothercmd", def_conf["whitelist_externals"] ) def test_lsr_path(self): """Test the _LSRPath class.""" real = "/no/such/path/to/realfile" temp = "/no/such/path/to/temp" stack_plain = (("myfile", 1, "myfunc", "text"),) stack_iniconfig = (("/path/to/iniconfig.py", 1, "__init__", "text"),) with patch("traceback.extract_stack", return_value=stack_plain): lsr = _LSRPath(real, temp) self.assertEqual(real, str(lsr)) with patch("traceback.extract_stack", return_value=stack_iniconfig): lsr = _LSRPath(real, temp) self.assertEqual(temp, str(lsr)) def test_is_lsr_enabled(self): """Test is_lsr_enabled.""" config = MockConfig({}) config._cfg.get = Mock(return_value="false") self.assertFalse(is_lsr_enabled(config)) config._cfg.sections[LSR_CONFIG_SECTION] = {} self.assertFalse(is_lsr_enabled(config)) self.assertFalse(is_lsr_enabled(config)) config._cfg.get = Mock(return_value="true") self.assertTrue(is_lsr_enabled(config)) config._cfg.get = Mock(return_value="true") os.environ[LSR_ENABLE_ENV] = "false" self.assertFalse(is_lsr_enabled(config)) config._cfg.get = Mock(return_value="false") os.environ[LSR_ENABLE_ENV] = "true" self.assertTrue(is_lsr_enabled(config)) config = MockConfig() config._cfg.get = Mock(return_value="false") os.environ[LSR_ENABLE_ENV] = "false" setattr(config.option, LSR_ENABLE, True) self.assertTrue(is_lsr_enabled(config)) config._cfg.get = Mock(return_value="true") os.environ[LSR_ENABLE_ENV] = "true" setattr(config.option, LSR_ENABLE, False) self.assertFalse(is_lsr_enabled(config)) del os.environ[LSR_ENABLE_ENV]
36.293144
79
0.572108
import os import shutil import tempfile try: from unittest import mock as unittest_mock from unittest.mock import MagicMock, Mock, patch except ImportError: import mock as unittest_mock from mock import MagicMock, Mock, patch from copy import deepcopy import pkg_resources import py.iniconfig import unittest2 from tox_lsr.hooks import ( CONFIG_FILES_SUBDIR, LSR_CONFIG_SECTION, LSR_ENABLE, LSR_ENABLE_ENV, SCRIPT_NAME, TOX_DEFAULT_INI, _LSRPath, is_lsr_enabled, merge_config, merge_envconf, merge_ini, merge_prop_values, prop_is_set, set_prop_values_ini, tox_addoption, tox_configure, ) from .utils import MockConfig class HooksTestCase(unittest2.TestCase): def setUp(self): self.toxworkdir = tempfile.mkdtemp() patch( "pkg_resources.resource_filename", return_value=self.toxworkdir + "/" + SCRIPT_NAME, ).start() self.default_tox_ini_b = pkg_resources.resource_string( "tox_lsr", CONFIG_FILES_SUBDIR + "/" + TOX_DEFAULT_INI ) self.default_tox_ini_raw = self.default_tox_ini_b.decode() self.tests_path = os.path.dirname( os.path.dirname(os.path.abspath(__file__)) ) self.fixture_path = os.path.join( self.tests_path, "fixtures", self.id().split(".")[-1] ) def tearDown(self): shutil.rmtree(self.toxworkdir) patch.stopall() def test_tox_addoption(self): parser = Mock(add_argument=Mock()) tox_addoption(parser) self.assertEqual(1, parser.add_argument.call_count) def test_tox_configure(self): config = MockConfig(toxworkdir=self.toxworkdir) with patch( "tox_lsr.hooks.is_lsr_enabled", return_value=False ) as mock_ile: tox_configure(config) self.assertEqual(1, mock_ile.call_count) setattr(config.option, LSR_ENABLE, True) default_config = MockConfig(toxworkdir=self.toxworkdir) with patch( "pkg_resources.resource_string", return_value=self.default_tox_ini_b, ) as mock_rs: with patch("tox_lsr.hooks.merge_config") as mock_mc: with patch( "tox_lsr.hooks.merge_ini", return_value=self.default_tox_ini_raw, ) as mock_mi: with patch( "tox_lsr.hooks.Config", side_effect=[TypeError(), default_config], ) as mock_cfg: with patch( "tox_lsr.hooks.ParseIni", side_effect=[TypeError(), None], ) as mock_pi: tox_configure(config) self.assertEqual(1, mock_rs.call_count) self.assertEqual(2, mock_pi.call_count) self.assertEqual(1, mock_mc.call_count) self.assertEqual(1, mock_mi.call_count) self.assertEqual(2, mock_cfg.call_count) def test_tox_merge_ini(self): config = MockConfig(toxworkdir=self.toxworkdir) tox_ini_file = os.path.join(self.fixture_path, "tox.ini") config._cfg = py.iniconfig.IniConfig(tox_ini_file) result = merge_ini(config, self.default_tox_ini_raw) expected_file = os.path.join(self.fixture_path, "result.ini") expected_ini = py.iniconfig.IniConfig(expected_file) result_ini = py.iniconfig.IniConfig("", result) self.assertDictEqual(expected_ini.sections, result_ini.sections) def test_tox_prop_is_set(self): tec = Mock(envname="prop") tec._reader = Mock() tec._reader._cfg = Mock() cfgdict = { "empty_str_prop": "", "str_prop": "str_prop", "int_prop": 0, "bool_prop": False, "float_prop": 0.0, "list_prop": [1, 2, 3], "empty_list_prop": [], "dict_prop": {"a": "a"}, "empty_dict_prop": {}, "obj_prop": object(), "none_prop": None, } tec._reader._cfg.sections = deepcopy({"testenv": cfgdict}) for prop in cfgdict: self.assertTrue(prop_is_set(tec, prop)) tec._reader._cfg.sections["testenv:prop"] = deepcopy(cfgdict) for prop in cfgdict: self.assertTrue(prop_is_set(tec, prop)) del tec._reader._cfg.sections["testenv"] del tec._reader._cfg.sections["testenv:prop"] tec.configure_mock(**deepcopy(cfgdict)) for prop in cfgdict: self.assertFalse(prop_is_set(tec, prop)) def test_tox_merge_prop_values(self): tec = MagicMock() def_tec = MagicMock() merge_prop_values("nosuchprop", tec, def_tec) self.assertFalse(tec.mock_calls) self.assertFalse(def_tec.mock_calls) tec = MagicMock() def_tec = MagicMock() propnames = ["setenv", "deps", "passenv", "whitelist_externals"] empty_attrs = { "setenv": {}, "deps": [], "passenv": set(), "whitelist_externals": [], } tec.configure_mock(**deepcopy(empty_attrs)) full_attrs = { "setenv": {"a": "a", "b": "b"}, "deps": ["a", "b"], "passenv": set(["a", "b"]), "whitelist_externals": ["a", "b"], } def_tec.configure_mock(**deepcopy(full_attrs)) for prop in propnames: merge_prop_values(prop, tec, def_tec) for prop in propnames: val = getattr(tec, prop) exp_val = full_attrs[prop] if isinstance(val, list): self.assertEqual(set(exp_val), set(val)) else: self.assertEqual(exp_val, val) tec = MagicMock() def_tec = MagicMock() tec.configure_mock(**deepcopy(full_attrs)) def_tec.configure_mock(**deepcopy(empty_attrs)) for prop in propnames: merge_prop_values(prop, tec, def_tec) for prop in propnames: val = getattr(tec, prop) exp_val = full_attrs[prop] if isinstance(val, list): self.assertEqual(set(exp_val), set(val)) else: self.assertEqual(exp_val, val) more_attrs = { "setenv": {"a": "a", "c": "c"}, "deps": ["a", "c"], "passenv": set(["a", "c"]), "whitelist_externals": ["a", "c"], } result_attrs = { "setenv": {"a": "a", "b": "b", "c": "c"}, "deps": ["a", "b", "c"], "passenv": set(["a", "b", "c"]), "whitelist_externals": ["a", "b", "c"], } tec = MagicMock() def_tec = MagicMock() tec.configure_mock(**deepcopy(full_attrs)) def_tec.configure_mock(**deepcopy(more_attrs)) for prop in propnames: merge_prop_values(prop, tec, def_tec) for prop in propnames: val = getattr(tec, prop) exp_val = result_attrs[prop] if isinstance(val, list): self.assertEqual(set(exp_val), set(val)) else: self.assertEqual(exp_val, val) def test_tox_merge_envconf(self): prop = "unsettable" def mock_unsettable_is_set(envconf, propname): if propname != prop: return False if envconf == def_tec: return True return False def_tec = Mock(unsettable="unsettable") tec = Mock() with patch( "tox_lsr.hooks.prop_is_set", side_effect=mock_unsettable_is_set ): with patch("tox_lsr.hooks.setattr", side_effect=AttributeError()): merge_envconf(tec, def_tec) self.assertNotEqual(tec.unsettable, "unsettable") prop = "propa" def mock_prop_is_set(envconf, propname): if propname != prop: return False if envconf == def_tec: return True return False unittest_mock.FILTER_DIR = ( False ) def_tec = Mock(spec=[prop], propa=prop, _ignoreme="ignoreme") tec = Mock(spec=[prop]) with patch("tox_lsr.hooks.prop_is_set", side_effect=mock_prop_is_set): merge_envconf(tec, def_tec) unittest_mock.FILTER_DIR = True self.assertEqual(prop, tec.propa) def mock_prop_is_set2(envconf, propname): if propname != prop: return False return True def_tec = Mock(spec=[prop], propa=prop) tec = Mock(spec=[prop], propa="someothervalue") with patch("tox_lsr.hooks.prop_is_set", side_effect=mock_prop_is_set2): with patch("tox_lsr.hooks.merge_prop_values") as mock_mpv: merge_envconf(tec, def_tec) self.assertEqual(1, mock_mpv.call_count) self.assertEqual("someothervalue", tec.propa) def test_tox_merge_config(self): tox_attrs = { "a": "a", "b": "b", } tec = Mock() tec._cfg = Mock() tec._cfg.sections = deepcopy({"tox": tox_attrs}) tec.configure_mock(**deepcopy(tox_attrs)) tec.envlist_explicit = False tec.envlist = ["a", "b"] tec.envlist_default = ["a", "b"] enva = {} envb = {} tec.envconfigs = {"a": enva, "b": envb} def_tox_attrs = {"a": "b", "b": "c", "c": "d", "_skip": "skip"} unittest_mock.FILTER_DIR = ( False ) def_tec = Mock() def_tec._cfg = Mock() def_tec._cfg.sections = deepcopy({"tox": def_tox_attrs}) def_tec.configure_mock(**deepcopy(def_tox_attrs)) def_tec.envlist = ["b", "c"] def_tec.envlist_default = ["b", "c"] envc = {} def_tec.envconfigs = {"b": {}, "c": envc} with patch("tox_lsr.hooks.merge_envconf") as mock_me: merge_config(tec, def_tec) self.assertEqual(1, mock_me.call_count) self.assertIs(enva, tec.envconfigs["a"]) self.assertIs(envb, tec.envconfigs["b"]) self.assertIs(envc, tec.envconfigs["c"]) self.assertEqual("a", tec.a) self.assertEqual("b", tec.b) self.assertEqual("d", tec.c) self.assertEqual(set(["a", "b", "c"]), set(tec.envlist)) self.assertEqual(set(["a", "b", "c"]), set(tec.envlist_default)) unittest_mock.FILTER_DIR = True def test_tox_set_set_prop_values_ini(self): conf = {"a": "a", "b": "b"} def_conf = {} set_prop_values_ini("a", def_conf, conf) self.assertEqual({"a": "a"}, def_conf) set_prop_values_ini("a", def_conf, conf) self.assertEqual({"a": "a"}, def_conf) set_prop_values_ini("b", def_conf, conf) self.assertEqual({"a": "a", "b": "b"}, def_conf) set_prop_values_ini("a", def_conf, conf) self.assertEqual({"a": "a", "b": "b"}, def_conf) conf = { "setenv": "a\nb", "deps": "a", "passenv": "TEST_*", "whitelist_externals": "mycmd\nmyothercmd", } def_conf = { "setenv": "c\nd", "deps": "b", "passenv": "*", "whitelist_externals": "bash", } set_prop_values_ini("setenv", def_conf, conf) self.assertEqual("c\nd\na\nb", def_conf["setenv"]) set_prop_values_ini("deps", def_conf, conf) self.assertEqual("b\na", def_conf["deps"]) set_prop_values_ini("passenv", def_conf, conf) self.assertEqual("*\nTEST_*", def_conf["passenv"]) set_prop_values_ini("whitelist_externals", def_conf, conf) self.assertEqual( "bash\nmycmd\nmyothercmd", def_conf["whitelist_externals"] ) def test_lsr_path(self): real = "/no/such/path/to/realfile" temp = "/no/such/path/to/temp" stack_plain = (("myfile", 1, "myfunc", "text"),) stack_iniconfig = (("/path/to/iniconfig.py", 1, "__init__", "text"),) with patch("traceback.extract_stack", return_value=stack_plain): lsr = _LSRPath(real, temp) self.assertEqual(real, str(lsr)) with patch("traceback.extract_stack", return_value=stack_iniconfig): lsr = _LSRPath(real, temp) self.assertEqual(temp, str(lsr)) def test_is_lsr_enabled(self): config = MockConfig({}) config._cfg.get = Mock(return_value="false") self.assertFalse(is_lsr_enabled(config)) config._cfg.sections[LSR_CONFIG_SECTION] = {} self.assertFalse(is_lsr_enabled(config)) self.assertFalse(is_lsr_enabled(config)) config._cfg.get = Mock(return_value="true") self.assertTrue(is_lsr_enabled(config)) config._cfg.get = Mock(return_value="true") os.environ[LSR_ENABLE_ENV] = "false" self.assertFalse(is_lsr_enabled(config)) config._cfg.get = Mock(return_value="false") os.environ[LSR_ENABLE_ENV] = "true" self.assertTrue(is_lsr_enabled(config)) config = MockConfig() config._cfg.get = Mock(return_value="false") os.environ[LSR_ENABLE_ENV] = "false" setattr(config.option, LSR_ENABLE, True) self.assertTrue(is_lsr_enabled(config)) config._cfg.get = Mock(return_value="true") os.environ[LSR_ENABLE_ENV] = "true" setattr(config.option, LSR_ENABLE, False) self.assertFalse(is_lsr_enabled(config)) del os.environ[LSR_ENABLE_ENV]
true
true
1c45e7af81826e60de1299cc184fcbaf42464f56
3,170
py
Python
lists/tests/test_models.py
brendanodwyer/python-tdd-book
ff3a8a8254a3112937ce9924dfa05ba52069c8bf
[ "Apache-2.0" ]
null
null
null
lists/tests/test_models.py
brendanodwyer/python-tdd-book
ff3a8a8254a3112937ce9924dfa05ba52069c8bf
[ "Apache-2.0" ]
null
null
null
lists/tests/test_models.py
brendanodwyer/python-tdd-book
ff3a8a8254a3112937ce9924dfa05ba52069c8bf
[ "Apache-2.0" ]
null
null
null
from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError from django.test import TestCase from lists.models import Item from lists.models import List User = get_user_model() class ItemModelTest(TestCase): def test_default_text(self): item = Item() self.assertEqual(item.text, "") class ListModelTest(TestCase): def test_item_is_related_to_list(self): list_ = List.objects.create() item = Item() item.list = list_ item.save() self.assertIn(item, list_.item_set.all()) def test_cannot_save_empty_list_items(self): list_ = List.objects.create() item = Item(list=list_, text="") with self.assertRaises(ValidationError): item.save() item.full_clean() def test_get_absolute_url(self): list_ = List.objects.create() self.assertEqual(list_.get_absolute_url(), f"/lists/{list_.id}/") def test_duplicate_items_are_invalid(self): list_ = List.objects.create() Item.objects.create(list=list_, text="bla") with self.assertRaises(ValidationError): item = Item(list=list_, text="bla") item.full_clean() def test_CAN_save_same_item_to_different_lists(self): list1 = List.objects.create() list2 = List.objects.create() Item.objects.create(list=list1, text="bla") item = Item(list=list2, text="bla") item.full_clean() # should not raise def test_list_ordering(self): list1 = List.objects.create() item1 = Item.objects.create(list=list1, text="i1") item2 = Item.objects.create(list=list1, text="i2") item3 = Item.objects.create(list=list1, text="i3") self.assertEqual(list(Item.objects.all()), [item1, item2, item3]) def test_string_representation(self): item = Item(text="some text") self.assertEqual(str(item), "some text") def test_create_new_creates_lists_and_first_item(self): List.create_new(first_item_text="new item text") new_item = Item.objects.first() self.assertEqual(new_item.text, "new item text") new_list = List.objects.first() self.assertEqual(new_item.list, new_list) def test_create_new_optionally_saves_owner(self): user = User.objects.create() List.create_new(first_item_text="new item text", owner=user) new_list = List.objects.first() self.assertEqual(new_list.owner, user) def test_lists_can_have_owners(self): List(owner=User()) # Should not raise def test_lists_owner_is_optinal(self): List().full_clean() # Should not raise def test_create_returns_new_list_object(self): returned = List.create_new(first_item_text="new item text") new_list = List.objects.first() self.assertEqual(returned, new_list) def test_list_name_is_first_item_text(self): new_list = List.objects.create() Item.objects.create(list=new_list, text="first item") Item.objects.create(list=new_list, text="second item") self.assertEqual(new_list.name, "first item")
34.086022
73
0.667508
from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError from django.test import TestCase from lists.models import Item from lists.models import List User = get_user_model() class ItemModelTest(TestCase): def test_default_text(self): item = Item() self.assertEqual(item.text, "") class ListModelTest(TestCase): def test_item_is_related_to_list(self): list_ = List.objects.create() item = Item() item.list = list_ item.save() self.assertIn(item, list_.item_set.all()) def test_cannot_save_empty_list_items(self): list_ = List.objects.create() item = Item(list=list_, text="") with self.assertRaises(ValidationError): item.save() item.full_clean() def test_get_absolute_url(self): list_ = List.objects.create() self.assertEqual(list_.get_absolute_url(), f"/lists/{list_.id}/") def test_duplicate_items_are_invalid(self): list_ = List.objects.create() Item.objects.create(list=list_, text="bla") with self.assertRaises(ValidationError): item = Item(list=list_, text="bla") item.full_clean() def test_CAN_save_same_item_to_different_lists(self): list1 = List.objects.create() list2 = List.objects.create() Item.objects.create(list=list1, text="bla") item = Item(list=list2, text="bla") item.full_clean() def test_list_ordering(self): list1 = List.objects.create() item1 = Item.objects.create(list=list1, text="i1") item2 = Item.objects.create(list=list1, text="i2") item3 = Item.objects.create(list=list1, text="i3") self.assertEqual(list(Item.objects.all()), [item1, item2, item3]) def test_string_representation(self): item = Item(text="some text") self.assertEqual(str(item), "some text") def test_create_new_creates_lists_and_first_item(self): List.create_new(first_item_text="new item text") new_item = Item.objects.first() self.assertEqual(new_item.text, "new item text") new_list = List.objects.first() self.assertEqual(new_item.list, new_list) def test_create_new_optionally_saves_owner(self): user = User.objects.create() List.create_new(first_item_text="new item text", owner=user) new_list = List.objects.first() self.assertEqual(new_list.owner, user) def test_lists_can_have_owners(self): List(owner=User()) def test_lists_owner_is_optinal(self): List().full_clean() def test_create_returns_new_list_object(self): returned = List.create_new(first_item_text="new item text") new_list = List.objects.first() self.assertEqual(returned, new_list) def test_list_name_is_first_item_text(self): new_list = List.objects.create() Item.objects.create(list=new_list, text="first item") Item.objects.create(list=new_list, text="second item") self.assertEqual(new_list.name, "first item")
true
true
1c45e7bb27a310e77f8849bbd05bd8e62fc757bb
12,127
py
Python
core/domain/caching_services.py
prayutsu/oppia
e82da7653f7bbfb9ded0e1ba16cd9f481ff5a786
[ "Apache-2.0" ]
2
2020-10-13T12:59:08.000Z
2020-10-13T17:10:26.000Z
core/domain/caching_services.py
gitter-badger/oppia
7d8e659264582d7ce74bc6c139e597b82bca0e04
[ "Apache-2.0" ]
35
2019-02-23T20:31:21.000Z
2019-08-19T12:32:13.000Z
core/domain/caching_services.py
gitter-badger/oppia
7d8e659264582d7ce74bc6c139e597b82bca0e04
[ "Apache-2.0" ]
1
2021-08-13T07:54:56.000Z
2021-08-13T07:54:56.000Z
# coding: utf-8 # # Copyright 2020 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Service functions to set and retrieve data from the memory cache.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules import json from core.domain import collection_domain from core.domain import exp_domain from core.domain import platform_parameter_domain from core.domain import skill_domain from core.domain import story_domain from core.domain import topic_domain from core.platform import models import python_utils memory_cache_services = models.Registry.import_cache_services() # NOTE: Namespaces and sub-namespaces cannot contain ':' because this is used as # an internal delimiter for cache keys that separates the namespace, the # sub-namespace, and the id in the cache keys. MEMCACHE_KEY_DELIMITER = ':' # This namespace supports sub-namespaces which are identified by the stringified # version number of the explorations within the sub-namespace. The value for # each key in this namespace should be a serialized representation of an # Exploration. There is also a special sub-namespace represented by the empty # string; this sub-namespace stores the latest version of the exploration. CACHE_NAMESPACE_EXPLORATION = 'exploration' # This namespace supports sub-namespaces which are identified by the stringified # version number of the collections within the sub-namespace. The value for # each key in this namespace should be a serialized representation of a # Collection. There is also a special sub-namespace represented by the empty # string; this sub-namespace stores the latest version of the collection. CACHE_NAMESPACE_COLLECTION = 'collection' # This namespace supports sub-namespaces which are identified by the stringified # version number of the skills within the sub-namespace. The value for # each key in this namespace should be a serialized representation of a # Skill. There is also a special sub-namespace represented by the empty # string; this sub-namespace stores the latest version of the skill. CACHE_NAMESPACE_SKILL = 'skill' # This namespace supports sub-namespaces which are identified by the stringified # version number of the stories within the sub-namespace. The value for # each key in this namespace should be a serialized representation of a # Story. There is also a special sub-namespace represented by the empty # string; this sub-namespace stores the latest version of the story. CACHE_NAMESPACE_STORY = 'story' # This namespace supports sub-namespaces which are identified by the stringified # version number of the topics within the sub-namespace. The value for # each key in this namespace should be a serialized representation of a # Topic. There is also a special sub-namespace represented by the empty # string; this sub-namespace stores the latest version of the topic. CACHE_NAMESPACE_TOPIC = 'topic' # This namespace supports sub-namespaces which are identified by the stringified # version number of the topics within the sub-namespace. The value for # each key in this namespace should be a serialized representation of a # Platform Parameter. This namespace does not support sub-namespaces. CACHE_NAMESPACE_PLATFORM_PARAMETER = 'platform' # The value for each key in this namespace should be a serialized representation # of a ConfigPropertyModel value (the 'value' attribute of a ConfigPropertyModel # object). This namespace does not support sub-namespaces. CACHE_NAMESPACE_CONFIG = 'config' # The sub-namespace is not necessary for the default namespace. The namespace # handles default datatypes allowed by Redis including Strings, Lists, Sets, # and Hashes. More details can be found at: https://redis.io/topics/data-types. CACHE_NAMESPACE_DEFAULT = 'default' DESERIALIZATION_FUNCTIONS = { CACHE_NAMESPACE_COLLECTION: collection_domain.Collection.deserialize, CACHE_NAMESPACE_EXPLORATION: exp_domain.Exploration.deserialize, CACHE_NAMESPACE_SKILL: skill_domain.Skill.deserialize, CACHE_NAMESPACE_STORY: story_domain.Story.deserialize, CACHE_NAMESPACE_TOPIC: topic_domain.Topic.deserialize, CACHE_NAMESPACE_PLATFORM_PARAMETER: ( platform_parameter_domain.PlatformParameter.deserialize), CACHE_NAMESPACE_CONFIG: lambda x: json.loads(x.decode('utf-8')), CACHE_NAMESPACE_DEFAULT: lambda x: json.loads(x.decode('utf-8')) } SERIALIZATION_FUNCTIONS = { CACHE_NAMESPACE_COLLECTION: lambda x: x.serialize(), CACHE_NAMESPACE_EXPLORATION: lambda x: x.serialize(), CACHE_NAMESPACE_SKILL: lambda x: x.serialize(), CACHE_NAMESPACE_STORY: lambda x: x.serialize(), CACHE_NAMESPACE_TOPIC: lambda x: x.serialize(), CACHE_NAMESPACE_PLATFORM_PARAMETER: lambda x: x.serialize(), CACHE_NAMESPACE_CONFIG: lambda x: json.dumps(x).encode('utf-8'), CACHE_NAMESPACE_DEFAULT: lambda x: json.dumps(x).encode('utf-8') } def _get_memcache_key(namespace, sub_namespace, obj_id): """Returns a memcache key for the class under the corresponding namespace and sub_namespace. Args: namespace: str. The namespace under which the values associated with the id lie. Use CACHE_NAMESPACE_DEFAULT as the namespace for ids that are not associated with a conceptual domain-layer entity and therefore don't require serialization. sub_namespace: str|None. The sub-namespace further differentiates the values. For Explorations, Skills, Stories, Topics, and Collections, the sub-namespace is the stringified version number of the objects. obj_id: str. The id of the value to store in the memory cache. Raises: Exception. The sub-namespace contains a ':'. Returns: str. The generated key for use in the memory cache in order to differentiate a passed-in key based on namespace and sub-namespace. """ sub_namespace_key_string = (sub_namespace or '') if MEMCACHE_KEY_DELIMITER in sub_namespace_key_string: raise ValueError( 'Sub-namespace %s cannot contain \':\'.' % sub_namespace_key_string) return '%s%s%s%s%s' % ( namespace, MEMCACHE_KEY_DELIMITER, sub_namespace_key_string, MEMCACHE_KEY_DELIMITER, obj_id) def flush_memory_cache(): """Flushes the memory cache by wiping all of the data.""" memory_cache_services.flush_cache() def get_multi(namespace, sub_namespace, obj_ids): """Get a dictionary of the {id, value} pairs from the memory cache. Args: namespace: str. The namespace under which the values associated with these object ids lie. The namespace determines how the objects are decoded from their JSON-encoded string. Use CACHE_NAMESPACE_DEFAULT as the namespace for objects that are not associated with a conceptual domain-layer entity and therefore don't require serialization. sub_namespace: str|None. The sub-namespace further differentiates the values. For Explorations, Skills, Stories, Topics, and Collections, the sub-namespace is either None or the stringified version number of the objects. If the sub-namespace is not required, pass in None. obj_ids: list(str). List of object ids corresponding to values to retrieve from the cache. Raises: ValueError. The namespace does not exist or is not recognized. Returns: dict(str, Exploration|Skill|Story|Topic|Collection|str). Dictionary of decoded (id, value) pairs retrieved from the platform caching service. """ result_dict = {} if len(obj_ids) == 0: return result_dict if namespace not in DESERIALIZATION_FUNCTIONS: raise ValueError('Invalid namespace: %s.' % namespace) memcache_keys = [ _get_memcache_key(namespace, sub_namespace, obj_id) for obj_id in obj_ids] values = memory_cache_services.get_multi(memcache_keys) for obj_id, value in python_utils.ZIP(obj_ids, values): if value: result_dict[obj_id] = DESERIALIZATION_FUNCTIONS[namespace](value) return result_dict def set_multi(namespace, sub_namespace, id_value_mapping): """Set multiple id values at once to the cache, where the values are all of a specific namespace type or a Redis compatible type (more details here: https://redis.io/topics/data-types). Args: namespace: str. The namespace under which the values associated with the id lie. Use CACHE_NAMESPACE_DEFAULT as the namespace for objects that are not associated with a conceptual domain-layer entity and therefore don't require serialization. sub_namespace: str|None. The sub-namespace further differentiates the values. For Explorations, Skills, Stories, Topics, and Collections, the sub-namespace is either None or the stringified version number of the objects. If the sub-namespace is not required, pass in None. id_value_mapping: dict(str, Exploration|Skill|Story|Topic|Collection|str). A dict of {id, value} pairs to set to the cache. Raises: ValueError. The namespace does not exist or is not recognized. Returns: bool. Whether all operations complete successfully. """ if len(id_value_mapping) == 0: return True if namespace not in SERIALIZATION_FUNCTIONS: raise ValueError('Invalid namespace: %s.' % namespace) memory_cache_id_value_mapping = ( { _get_memcache_key(namespace, sub_namespace, obj_id): SERIALIZATION_FUNCTIONS[namespace](value) for obj_id, value in id_value_mapping.items() }) return memory_cache_services.set_multi(memory_cache_id_value_mapping) def delete_multi(namespace, sub_namespace, obj_ids): """Deletes multiple ids in the cache. Args: namespace: str. The namespace under which the values associated with the id lie. Use CACHE_NAMESPACE_DEFAULT namespace for object ids that are not associated with a conceptual domain-layer entity and therefore don't require serialization. sub_namespace: str|None. The sub-namespace further differentiates the values. For Explorations, Skills, Stories, Topics, and Collections, the sub-namespace is either None or the stringified version number of the objects. If the sub-namespace is not required, pass in None. obj_ids: list(str). A list of id strings to delete from the cache. Raises: ValueError. The namespace does not exist or is not recognized. Returns: bool. Whether all operations complete successfully. """ if len(obj_ids) == 0: return True if namespace not in DESERIALIZATION_FUNCTIONS: raise ValueError('Invalid namespace: %s.' % namespace) memcache_keys = [ _get_memcache_key(namespace, sub_namespace, obj_id) for obj_id in obj_ids] return memory_cache_services.delete_multi(memcache_keys) == len(obj_ids) def get_memory_cache_stats(): """Get a memory profile of the cache in a dictionary dependent on how the caching service profiles its own cache. Returns: MemoryCacheStats. MemoryCacheStats object containing the total allocated memory in bytes, peak memory usage in bytes, and the total number of keys stored as values. """ return memory_cache_services.get_memory_cache_stats()
45.762264
80
0.740002
from __future__ import absolute_import from __future__ import unicode_literals import json from core.domain import collection_domain from core.domain import exp_domain from core.domain import platform_parameter_domain from core.domain import skill_domain from core.domain import story_domain from core.domain import topic_domain from core.platform import models import python_utils memory_cache_services = models.Registry.import_cache_services() MEMCACHE_KEY_DELIMITER = ':' CACHE_NAMESPACE_EXPLORATION = 'exploration' CACHE_NAMESPACE_COLLECTION = 'collection' CACHE_NAMESPACE_SKILL = 'skill' CACHE_NAMESPACE_STORY = 'story' CACHE_NAMESPACE_TOPIC = 'topic' CACHE_NAMESPACE_PLATFORM_PARAMETER = 'platform' CACHE_NAMESPACE_CONFIG = 'config' CACHE_NAMESPACE_DEFAULT = 'default' DESERIALIZATION_FUNCTIONS = { CACHE_NAMESPACE_COLLECTION: collection_domain.Collection.deserialize, CACHE_NAMESPACE_EXPLORATION: exp_domain.Exploration.deserialize, CACHE_NAMESPACE_SKILL: skill_domain.Skill.deserialize, CACHE_NAMESPACE_STORY: story_domain.Story.deserialize, CACHE_NAMESPACE_TOPIC: topic_domain.Topic.deserialize, CACHE_NAMESPACE_PLATFORM_PARAMETER: ( platform_parameter_domain.PlatformParameter.deserialize), CACHE_NAMESPACE_CONFIG: lambda x: json.loads(x.decode('utf-8')), CACHE_NAMESPACE_DEFAULT: lambda x: json.loads(x.decode('utf-8')) } SERIALIZATION_FUNCTIONS = { CACHE_NAMESPACE_COLLECTION: lambda x: x.serialize(), CACHE_NAMESPACE_EXPLORATION: lambda x: x.serialize(), CACHE_NAMESPACE_SKILL: lambda x: x.serialize(), CACHE_NAMESPACE_STORY: lambda x: x.serialize(), CACHE_NAMESPACE_TOPIC: lambda x: x.serialize(), CACHE_NAMESPACE_PLATFORM_PARAMETER: lambda x: x.serialize(), CACHE_NAMESPACE_CONFIG: lambda x: json.dumps(x).encode('utf-8'), CACHE_NAMESPACE_DEFAULT: lambda x: json.dumps(x).encode('utf-8') } def _get_memcache_key(namespace, sub_namespace, obj_id): sub_namespace_key_string = (sub_namespace or '') if MEMCACHE_KEY_DELIMITER in sub_namespace_key_string: raise ValueError( 'Sub-namespace %s cannot contain \':\'.' % sub_namespace_key_string) return '%s%s%s%s%s' % ( namespace, MEMCACHE_KEY_DELIMITER, sub_namespace_key_string, MEMCACHE_KEY_DELIMITER, obj_id) def flush_memory_cache(): memory_cache_services.flush_cache() def get_multi(namespace, sub_namespace, obj_ids): result_dict = {} if len(obj_ids) == 0: return result_dict if namespace not in DESERIALIZATION_FUNCTIONS: raise ValueError('Invalid namespace: %s.' % namespace) memcache_keys = [ _get_memcache_key(namespace, sub_namespace, obj_id) for obj_id in obj_ids] values = memory_cache_services.get_multi(memcache_keys) for obj_id, value in python_utils.ZIP(obj_ids, values): if value: result_dict[obj_id] = DESERIALIZATION_FUNCTIONS[namespace](value) return result_dict def set_multi(namespace, sub_namespace, id_value_mapping): if len(id_value_mapping) == 0: return True if namespace not in SERIALIZATION_FUNCTIONS: raise ValueError('Invalid namespace: %s.' % namespace) memory_cache_id_value_mapping = ( { _get_memcache_key(namespace, sub_namespace, obj_id): SERIALIZATION_FUNCTIONS[namespace](value) for obj_id, value in id_value_mapping.items() }) return memory_cache_services.set_multi(memory_cache_id_value_mapping) def delete_multi(namespace, sub_namespace, obj_ids): if len(obj_ids) == 0: return True if namespace not in DESERIALIZATION_FUNCTIONS: raise ValueError('Invalid namespace: %s.' % namespace) memcache_keys = [ _get_memcache_key(namespace, sub_namespace, obj_id) for obj_id in obj_ids] return memory_cache_services.delete_multi(memcache_keys) == len(obj_ids) def get_memory_cache_stats(): return memory_cache_services.get_memory_cache_stats()
true
true
1c45e7bf89b8194eb005c56d95a2d6c85d741f03
326
py
Python
pa05_find_max.py
jpch89/picalgo
73aa98e477c68bb39d337914065c0fe1b4bad756
[ "MIT" ]
null
null
null
pa05_find_max.py
jpch89/picalgo
73aa98e477c68bb39d337914065c0fe1b4bad756
[ "MIT" ]
null
null
null
pa05_find_max.py
jpch89/picalgo
73aa98e477c68bb39d337914065c0fe1b4bad756
[ "MIT" ]
null
null
null
def find_max(arr): if len(arr) == 1: return arr[0] elif len(arr) == 2: return arr[0] if arr[0] > arr[1] else arr[1] first = arr[0] second = find_max(arr[1:]) return first if first > second else second if __name__ == '__main__': arr = [1, 7, 4, 8] print(find_max(arr)) """ 8 """
17.157895
52
0.53681
def find_max(arr): if len(arr) == 1: return arr[0] elif len(arr) == 2: return arr[0] if arr[0] > arr[1] else arr[1] first = arr[0] second = find_max(arr[1:]) return first if first > second else second if __name__ == '__main__': arr = [1, 7, 4, 8] print(find_max(arr))
true
true
1c45e9e5f64d477f9fcc29374315d1729650f609
7,784
py
Python
tensorflow/tools/pip_package/setup.py
jjzhang166/tensorflow
61c0b39011671628ee85c2b49bc8845520018aa2
[ "Apache-2.0" ]
3
2017-05-31T01:33:48.000Z
2020-02-18T17:12:56.000Z
tensorflow/tools/pip_package/setup.py
jjzhang166/tensorflow
61c0b39011671628ee85c2b49bc8845520018aa2
[ "Apache-2.0" ]
null
null
null
tensorflow/tools/pip_package/setup.py
jjzhang166/tensorflow
61c0b39011671628ee85c2b49bc8845520018aa2
[ "Apache-2.0" ]
1
2018-12-28T12:55:11.000Z
2018-12-28T12:55:11.000Z
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import fnmatch import os import re import sys from setuptools import find_packages, setup, Command from setuptools.command.install import install as InstallCommandBase from setuptools.dist import Distribution # This version string is semver compatible, but incompatible with pip. # For pip, we will remove all '-' characters from this string, and use the # result for pip. _VERSION = '1.3.0' REQUIRED_PACKAGES = [ 'enum34 >= 1.1.6', 'numpy >= 1.12.1', 'six >= 1.10.0', 'protobuf >= 3.3.0', 'tensorflow-tensorboard >= 0.1.0, < 0.2.0', 'autograd >= 1.1.11', ] project_name = 'tensorflow' if '--project_name' in sys.argv: project_name_idx = sys.argv.index('--project_name') project_name = sys.argv[project_name_idx + 1] sys.argv.remove('--project_name') sys.argv.pop(project_name_idx) # python3 requires wheel 0.26 if sys.version_info.major == 3: REQUIRED_PACKAGES.append('wheel >= 0.26') else: REQUIRED_PACKAGES.append('wheel') # mock comes with unittest.mock for python3, need to install for python2 REQUIRED_PACKAGES.append('mock >= 2.0.0') # remove tensorboard from tf-nightly packages if 'tf_nightly' in project_name: for package in REQUIRED_PACKAGES: if 'tensorflow-tensorboard' in package: REQUIRED_PACKAGES.remove(package) break # weakref.finalize was introduced in Python 3.4 if sys.version_info < (3, 4): REQUIRED_PACKAGES.append('backports.weakref >= 1.0rc1') # pylint: disable=line-too-long CONSOLE_SCRIPTS = [ 'saved_model_cli = tensorflow.python.tools.saved_model_cli:main', # We need to keep the TensorBoard command, even though the console script # is now declared by the tensorboard pip package. If we remove the # TensorBoard command, pip will inappropriately remove it during install, # even though the command is not removed, just moved to a different wheel. 'tensorboard = tensorboard.main:main', ] # pylint: enable=line-too-long # remove the tensorboard console script if building tf_nightly if 'tf_nightly' in project_name: CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:main') TEST_PACKAGES = [ 'scipy >= 0.15.1', ] class BinaryDistribution(Distribution): def has_ext_modules(self): return True class InstallCommand(InstallCommandBase): """Override the dir where the headers go.""" def finalize_options(self): ret = InstallCommandBase.finalize_options(self) self.install_headers = os.path.join(self.install_purelib, 'tensorflow', 'include') return ret class InstallHeaders(Command): """Override how headers are copied. The install_headers that comes with setuptools copies all files to the same directory. But we need the files to be in a specific directory hierarchy for -I <include_dir> to work correctly. """ description = 'install C/C++ header files' user_options = [('install-dir=', 'd', 'directory to install header files to'), ('force', 'f', 'force installation (overwrite existing files)'), ] boolean_options = ['force'] def initialize_options(self): self.install_dir = None self.force = 0 self.outfiles = [] def finalize_options(self): self.set_undefined_options('install', ('install_headers', 'install_dir'), ('force', 'force')) def mkdir_and_copy_file(self, header): install_dir = os.path.join(self.install_dir, os.path.dirname(header)) # Get rid of some extra intervening directories so we can have fewer # directories for -I install_dir = re.sub('/google/protobuf_archive/src', '', install_dir) # Copy eigen code into tensorflow/include. # A symlink would do, but the wheel file that gets created ignores # symlink within the directory hierarchy. # NOTE(keveman): Figure out how to customize bdist_wheel package so # we can do the symlink. if 'external/eigen_archive/' in install_dir: extra_dir = install_dir.replace('external/eigen_archive', '') if not os.path.exists(extra_dir): self.mkpath(extra_dir) self.copy_file(header, extra_dir) if not os.path.exists(install_dir): self.mkpath(install_dir) return self.copy_file(header, install_dir) def run(self): hdrs = self.distribution.headers if not hdrs: return self.mkpath(self.install_dir) for header in hdrs: (out, _) = self.mkdir_and_copy_file(header) self.outfiles.append(out) def get_inputs(self): return self.distribution.headers or [] def get_outputs(self): return self.outfiles def find_files(pattern, root): """Return all the files matching pattern below root dir.""" for path, _, files in os.walk(root): for filename in fnmatch.filter(files, pattern): yield os.path.join(path, filename) matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x] matches += ['../' + x for x in find_files('*', '_solib_k8') if '.py' not in x] if os.name == 'nt': EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd' else: EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so' headers = (list(find_files('*.h', 'tensorflow/core')) + list(find_files('*.h', 'tensorflow/stream_executor')) + list(find_files('*.h', 'google/protobuf_archive/src')) + list(find_files('*', 'third_party/eigen3')) + list(find_files('*', 'external/eigen_archive')) + list(find_files('*.h', 'external/nsync/public'))) setup( name=project_name, version=_VERSION.replace('-', ''), description='TensorFlow helps the tensors flow', long_description='', url='http://tensorflow.org/', author='Google Inc.', author_email='opensource@google.com', # Contained modules and scripts. packages=find_packages(), entry_points={ 'console_scripts': CONSOLE_SCRIPTS, }, headers=headers, install_requires=REQUIRED_PACKAGES, tests_require=REQUIRED_PACKAGES + TEST_PACKAGES, # Add in any packaged data. include_package_data=True, package_data={ 'tensorflow': [ EXTENSION_NAME, ] + matches, }, zip_safe=False, distclass=BinaryDistribution, cmdclass={ 'install_headers': InstallHeaders, 'install': InstallCommand, }, # PyPI package information. classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Libraries', ], license='Apache 2.0', keywords='tensorflow tensor machine learning',)
33.264957
80
0.675745
from __future__ import absolute_import from __future__ import division from __future__ import print_function import fnmatch import os import re import sys from setuptools import find_packages, setup, Command from setuptools.command.install import install as InstallCommandBase from setuptools.dist import Distribution _VERSION = '1.3.0' REQUIRED_PACKAGES = [ 'enum34 >= 1.1.6', 'numpy >= 1.12.1', 'six >= 1.10.0', 'protobuf >= 3.3.0', 'tensorflow-tensorboard >= 0.1.0, < 0.2.0', 'autograd >= 1.1.11', ] project_name = 'tensorflow' if '--project_name' in sys.argv: project_name_idx = sys.argv.index('--project_name') project_name = sys.argv[project_name_idx + 1] sys.argv.remove('--project_name') sys.argv.pop(project_name_idx) if sys.version_info.major == 3: REQUIRED_PACKAGES.append('wheel >= 0.26') else: REQUIRED_PACKAGES.append('wheel') REQUIRED_PACKAGES.append('mock >= 2.0.0') if 'tf_nightly' in project_name: for package in REQUIRED_PACKAGES: if 'tensorflow-tensorboard' in package: REQUIRED_PACKAGES.remove(package) break if sys.version_info < (3, 4): REQUIRED_PACKAGES.append('backports.weakref >= 1.0rc1') CONSOLE_SCRIPTS = [ 'saved_model_cli = tensorflow.python.tools.saved_model_cli:main', 'tensorboard = tensorboard.main:main', ] if 'tf_nightly' in project_name: CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:main') TEST_PACKAGES = [ 'scipy >= 0.15.1', ] class BinaryDistribution(Distribution): def has_ext_modules(self): return True class InstallCommand(InstallCommandBase): def finalize_options(self): ret = InstallCommandBase.finalize_options(self) self.install_headers = os.path.join(self.install_purelib, 'tensorflow', 'include') return ret class InstallHeaders(Command): description = 'install C/C++ header files' user_options = [('install-dir=', 'd', 'directory to install header files to'), ('force', 'f', 'force installation (overwrite existing files)'), ] boolean_options = ['force'] def initialize_options(self): self.install_dir = None self.force = 0 self.outfiles = [] def finalize_options(self): self.set_undefined_options('install', ('install_headers', 'install_dir'), ('force', 'force')) def mkdir_and_copy_file(self, header): install_dir = os.path.join(self.install_dir, os.path.dirname(header)) install_dir = re.sub('/google/protobuf_archive/src', '', install_dir) if 'external/eigen_archive/' in install_dir: extra_dir = install_dir.replace('external/eigen_archive', '') if not os.path.exists(extra_dir): self.mkpath(extra_dir) self.copy_file(header, extra_dir) if not os.path.exists(install_dir): self.mkpath(install_dir) return self.copy_file(header, install_dir) def run(self): hdrs = self.distribution.headers if not hdrs: return self.mkpath(self.install_dir) for header in hdrs: (out, _) = self.mkdir_and_copy_file(header) self.outfiles.append(out) def get_inputs(self): return self.distribution.headers or [] def get_outputs(self): return self.outfiles def find_files(pattern, root): for path, _, files in os.walk(root): for filename in fnmatch.filter(files, pattern): yield os.path.join(path, filename) matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x] matches += ['../' + x for x in find_files('*', '_solib_k8') if '.py' not in x] if os.name == 'nt': EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd' else: EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so' headers = (list(find_files('*.h', 'tensorflow/core')) + list(find_files('*.h', 'tensorflow/stream_executor')) + list(find_files('*.h', 'google/protobuf_archive/src')) + list(find_files('*', 'third_party/eigen3')) + list(find_files('*', 'external/eigen_archive')) + list(find_files('*.h', 'external/nsync/public'))) setup( name=project_name, version=_VERSION.replace('-', ''), description='TensorFlow helps the tensors flow', long_description='', url='http://tensorflow.org/', author='Google Inc.', author_email='opensource@google.com', packages=find_packages(), entry_points={ 'console_scripts': CONSOLE_SCRIPTS, }, headers=headers, install_requires=REQUIRED_PACKAGES, tests_require=REQUIRED_PACKAGES + TEST_PACKAGES, include_package_data=True, package_data={ 'tensorflow': [ EXTENSION_NAME, ] + matches, }, zip_safe=False, distclass=BinaryDistribution, cmdclass={ 'install_headers': InstallHeaders, 'install': InstallCommand, }, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Libraries', ], license='Apache 2.0', keywords='tensorflow tensor machine learning',)
true
true
1c45eac1149605f449ce85664d9f605d2ddc6d4b
14,968
py
Python
release/python/l2tester/interface.py
gringolito/l2tester
eb8eddad6a9fee33e05e0d8d601229cd704e5464
[ "MIT" ]
8
2018-04-05T12:05:42.000Z
2021-07-01T10:44:29.000Z
release/python/l2tester/interface.py
gringolito/l2tester
eb8eddad6a9fee33e05e0d8d601229cd704e5464
[ "MIT" ]
6
2018-04-05T10:36:31.000Z
2021-08-08T08:06:13.000Z
release/python/l2tester/interface.py
gringolito/l2tester
eb8eddad6a9fee33e05e0d8d601229cd704e5464
[ "MIT" ]
9
2018-04-04T19:15:49.000Z
2021-08-07T10:17:10.000Z
try: from pyroute2 import IPRoute except: raise Exception(""" l2tester.interface depends on the following module: * pyroute2 : available at https://pypi.python.org/pypi/pyroute2 Download .tar.gz, extract it, enter folder and run 'sudo python setup.py install' to install this module. """) import socket import struct import fcntl import ctypes import os import re import logging from select import select # From <linux/if_ether.h> ETH_P_ALL = 0x0003 # From <linux/socket.h> SOL_PACKET = 263 # From <linux/if_packet.h> PACKET_MR_PROMISC = 1 PACKET_ADD_MEMBERSHIP = 0x0001 PACKET_DROP_MEMBERSHIP = 0x0002 ## Ethtool ######################################################################################## # From <linux/ethtool.h> ETHTOOL_GSET = 0x00000001 ETHTOOL_SSET = 0x00000002 # From <linux/sockios.h> SIOCETHTOOL = 0x8946 class Ethtool(): """ Implement ethtool functionality by ioctl with struct ethtool_cmd from <linux/ethtool.h> struct ethtool_cmd { u32 cmd; u32 supported; /* Features this interface supports */ u32 advertising; /* Features this interface advertises */ u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */ u8 duplex; /* Duplex, half or full */ u8 port; /* Which connector port */ u8 phy_address; u8 transceiver; /* Which transceiver to use */ u8 autoneg; /* Enable or disable autonegotiation */ u32 maxtxpkt; /* Tx pkts before generating tx int */ u32 maxrxpkt; /* Rx pkts before generating rx int */ u32 reserved[4]; }; """ st_format = "IIIHBBBBBII16x" def __init__(self, if_name): """ Initialize ethtool. @param if_name Name of interface. """ self.data = ctypes.create_string_buffer(44) # sizeof(struct ethtool_cmd) self.__unpack() self.ifreq_input = struct.pack('16sI12x', if_name, ctypes.addressof(self.data)) self.sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) def get(self): """ Request ethtool information using ioctl. Update object parameters. """ self.cmd = ETHTOOL_GSET self.__pack() fcntl.ioctl(self.sockfd, SIOCETHTOOL, self.ifreq_input) self.__unpack() def set(self): """ Configure ethtool information using ioctl. Must be preceded by a 'get' if not all fields are changed. """ self.cmd = ETHTOOL_SSET self.__pack() fcntl.ioctl(self.sockfd, SIOCETHTOOL, self.ifreq_input) def __unpack(self): """ [private] Extract fields from buffer. """ unpacked = struct.unpack(self.st_format, self.data[:]) self.cmd = unpacked[0] self.supported = unpacked[1] self.advertising = unpacked[2] self.speed = unpacked[3] self.duplex = unpacked[4] self.port = unpacked[5] self.phy_address = unpacked[6] self.transceiver = unpacked[7] self.autoneg = unpacked[8] self.maxtxpkt = unpacked[9] self.maxrxpkt = unpacked[10] def __pack(self): """ Updated buffer with current fields. """ packed = struct.pack(self.st_format, self.cmd, self.supported, self.advertising, self.speed, self.duplex, self.port, self.phy_address, self.transceiver, self.autoneg, self.maxtxpkt, self.maxrxpkt) for i in xrange(44): self.data[i] = packed[i] ## Interface ###################################################################################### class Interface(): """ Define ethernet interface using low level RAW sockets. NOTE: To create RAW sockets, you must be superuser or have 'cap_net_raw' capabilities. You can set the capabilities to python using: $ sudo setcap cap_mac_admin,cap_net_raw,cap_net_admin=eip /usr/bin/python2.6 """ netlink = IPRoute() def __init__(self, name, eth_type=ETH_P_ALL): """ Initialize interface. Open socket and set interface in promiscuous mode. @param name Name of the interface. Ex: 'eth0' @param eth_type Ethernet protocols read by this interface. Default: ALL PROTOCOLS. """ self.logger = logging.getLogger("PC eth") self.eth_type = eth_type self.name = name self.added_ips = [] self.is_vlan = False # If the interface is not part of IPDB, it can be a VLAN if not self.netlink.link_lookup(ifname=self.name): vlan_match = re.match( "^(?P<base_interface>eth\d+)\.(?P<vlan_id>[1-9]\d{1,3})$", self.name) if vlan_match is None: raise Exception("Invalid interface name " + self.name) base = vlan_match.group('base_interface') vid = int(vlan_match.group('vlan_id')) base_idx = self.netlink.link_lookup(ifname=base) if not base_idx: raise Exception("Invalid base interface name " + self.name) try: request = { 'index': 0, 'ipaddr': [], 'link': base_idx[0], 'flags': 0, 'ifname': self.name, 'ports': [], 'IFLA_LINKINFO': { 'attrs': [ ['IFLA_INFO_DATA', { 'attrs': [['IFLA_VLAN_ID', vid]] }], ['IFLA_INFO_KIND', 'vlan'] ] } } # Send request to create new interface with VLAN self.netlink.link('add', **request) self.is_vlan = True except: self.logger.critical("Couldn't create interface %s", self.name) raise # Get Interface Index, set to UP, get MTU and MAC Address self.if_index = self.netlink.link_lookup(ifname=self.name)[0] self.netlink.link('set', index=self.if_index, state='up') info = dict(self.netlink.get_links(self.if_index)[0]['attrs']) self.mac_address = info['IFLA_ADDRESS'].upper() self.mtu = info['IFLA_MTU'] # Create socket to receive/send frames: self.sockfd = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(self.eth_type)) self.sockfd.bind((self.name, self.eth_type)) # Enable promiscuous mode: self.set_promiscuous(True) # By default, start using auto-negotiation self.using_forced_speed_duplex = False def __del__(self): """ Destructor. Disable promiscuous mode on interface. """ # Clean added IP addresses. for ip in self.added_ips: self.__set_ip_address(ip[0], ip[1], 'delete') # Remove VLAN if it was created. if self.is_vlan: self.netlink.link('delete', index=self.if_index) # Disable promiscuous mode: self.set_promiscuous(False) # Leave interface with auto-negotiation enabled: if self.using_forced_speed_duplex: self.enable_auto_negotiation() def recv(self): """ Receive a packet. If it's an outgoing packet ignore it. """ packet, address = self.sockfd.recvfrom(self.mtu) return packet if address[2] != socket.PACKET_OUTGOING else None def send(self, packet): """ Send a packet through this interface. """ self.sockfd.sendto(str(packet), 0, (self.name, self.eth_type)) def flush(self): """ Remove all packets from read buffer. """ self.sockfd.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 0) while True: r, w, e = select([self.sockfd.fileno()], [], [], 0) if r: os.read(self.sockfd.fileno(), self.mtu) else: break self.sockfd.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2**30) def set_promiscuous(self, enable): """ Enable/Disable promiscuous mode on interface. @param enable True to enable, False to disable. """ cmd = PACKET_ADD_MEMBERSHIP if enable else PACKET_DROP_MEMBERSHIP mreq = struct.pack('IHH8s', self.if_index, PACKET_MR_PROMISC, 0, "") self.sockfd.setsockopt(SOL_PACKET, cmd, mreq) def add_ip_address(self, ip_address): """ Adds an IP address/network mask (the default prefix is 24) @param ip_address The IP address followed optionally by mask size. Ex: 192.168.0.24/24 """ self.__set_ip_address(ip_address, socket.AF_INET, 'add') self.added_ips.append((ip_address, socket.AF_INET)) def del_ip_address(self, ip_address): """ Deletes an IP address/network mask (the default prefix is 24) @param ip_address The IP address followed optionally by mask size. Ex: 192.168.0.24/24 """ self.__set_ip_address(ip_address, socket.AF_INET, 'delete') self.added_ips.remove((ip_address, socket.AF_INET)) def add_ipv6_address(self, ipv6_address): """ Adds an IPv6 address/network mask (the default prefix is 24) @param ip_address The IPv6 address followed optionally by mask size. Ex: 56::1/24 """ self.__set_ip_address(ipv6_address, socket.AF_INET6, 'add') self.added_ips.append((ipv6_address, socket.AF_INET6)) def del_ipv6_address(self, ipv6_address): """ Deletes an IPv6 address/network mask (the default prefix is 24) @param ip_address The IPv6 address followed optionally by mask size. Ex: 56::1/24 """ self.__set_ip_address(ipv6_address, socket.AF_INET6, 'delete') self.added_ips.remove((ipv6_address, socket.AF_INET6)) def enable_auto_negotiation(self): """ Enable auto-negotiation for Ethernet link. """ ethtool = Ethtool(self.name) ethtool.get() ethtool.advertising = ethtool.supported ethtool.autoneg = 1 ethtool.set() self.using_forced_speed_duplex = False self.logger.info("[%s] Enabled Auto-negotiation.", self.name) def force_speed_duplex(self, speed, duplex): """ Configure interface speed/duplex disabling auto-negotiation. @param speed Set forced speed. Values: 10, 100, 1000, 2500, 10000. @param duplex Set forced duplex. """ if not speed in [10, 100, 1000, 2500, 10000]: raise ValueError("Speed can only be: 10, 100, 1000, 2500 or 10000 Mbps.") ethtool = Ethtool(self.name) ethtool.get() ethtool.speed = speed ethtool.duplex = 1 if duplex else 0 ethtool.autoneg = 0 ethtool.set() self.using_forced_speed_duplex = True self.logger.info("[%s] Configured forced speed: %d Mbps / %s duplex", self.name, speed, "full" if duplex else "half") def has_ip_address(self, ip_address): """ Returns True if the address is already configured in the interface, and False otherwise @param ip_address The IP address to be checked """ return (self.__check_ip_address(ip_address, socket.AF_INET) or self.__check_ip_address(ip_address, socket.AF_INET6)) def set_mtu(self, mtu): """ Configure interface MTU. @param mtu New value for MTU. """ self.netlink.link('set', index=self.if_index, mtu=mtu) def set_mac_address(self, mac_address): """ Configure a new MAC address at this interface @param mac_address The MAC address to be set """ self.netlink.link('set', index=self.if_index, address=mac_address) self.mac_address = mac_address def __check_ip_address(self, ip_address, ip_family): """ Returns True if the address is already configured in the interface, and False otherwise @param ip_address The IP address to be checked @param ip_family socket.AF_INET if ip_address is an IPv4 address; socket.AF_INET6 otherwise """ address_types = ['IFA_ADDRESS', 'IFA_LOCAL', 'IFA_BROADCAST', 'IFA_ANYCAST', 'IFA_MULTICAST'] for interface in self.netlink.get_addr(family=ip_family): if interface['index'] != self.if_index: continue for address in interface['attrs']: if address[0] in address_types and address[1] == ip_address: return True return False def __set_ip_address(self, ip_address, ip_family, action): """ Adds or deletes an IP address/network mask (optional) @param ip_address The IP address followed optionally by mask size. Ex: 192.168.0.24/24; 56::1/24 @param ip_family socket.AF_INET to IPv4 addresses; socket.AF_INET6 to IPv6 addresses @param action 'add' or 'del', to add or delete an IP address, respectively """ ip_and_mask = ip_address.split('/') ip_version = 4 if ip_family == socket.AF_INET else 6 network_mask = 24 if len(ip_and_mask) < 2 else int(ip_and_mask[1]) exists = self.__check_ip_address(ip_and_mask[0], ip_family) if (action == 'add' and exists) or (action == 'delete' and not exists): self.logger.info('No need to %s the IP%d address %s/%d from/to %s because it already %sexists', action, ip_version, ip_and_mask[0], network_mask, self.name, '' if exists else 'does not ') return self.logger.info("%s IPv%d address %s/%d in %s", action, ip_version, ip_and_mask[0], network_mask, self.name) self.netlink.addr(action, self.if_index, address=ip_and_mask[0], mask=network_mask, family=ip_family) ## Access PC interfaces ############################################################################ interface_instances = {} def get_interface(if_name): """ Get interface reference. It's used to avoid multiple sockets for the same interface. """ if not if_name in interface_instances: interface_instances[if_name] = Interface(if_name) return interface_instances[if_name] def mac_address(if_name): """ Shortcut to get_interface(if_name).mac_address. """ return get_interface(if_name).mac_address def delete_interfaces(): """ Delete all created interfaces. """ for if_name in interface_instances.keys(): del interface_instances[if_name]
38.183673
120
0.587654
try: from pyroute2 import IPRoute except: raise Exception(""" l2tester.interface depends on the following module: * pyroute2 : available at https://pypi.python.org/pypi/pyroute2 Download .tar.gz, extract it, enter folder and run 'sudo python setup.py install' to install this module. """) import socket import struct import fcntl import ctypes import os import re import logging from select import select ETH_P_ALL = 0x0003 SOL_PACKET = 263 PACKET_MR_PROMISC = 1 PACKET_ADD_MEMBERSHIP = 0x0001 PACKET_DROP_MEMBERSHIP = 0x0002 ICAST'] for interface in self.netlink.get_addr(family=ip_family): if interface['index'] != self.if_index: continue for address in interface['attrs']: if address[0] in address_types and address[1] == ip_address: return True return False def __set_ip_address(self, ip_address, ip_family, action): ip_and_mask = ip_address.split('/') ip_version = 4 if ip_family == socket.AF_INET else 6 network_mask = 24 if len(ip_and_mask) < 2 else int(ip_and_mask[1]) exists = self.__check_ip_address(ip_and_mask[0], ip_family) if (action == 'add' and exists) or (action == 'delete' and not exists): self.logger.info('No need to %s the IP%d address %s/%d from/to %s because it already %sexists', action, ip_version, ip_and_mask[0], network_mask, self.name, '' if exists else 'does not ') return self.logger.info("%s IPv%d address %s/%d in %s", action, ip_version, ip_and_mask[0], network_mask, self.name) self.netlink.addr(action, self.if_index, address=ip_and_mask[0], mask=network_mask, family=ip_family) ## Access PC interfaces ############################################################################ interface_instances = {} def get_interface(if_name): if not if_name in interface_instances: interface_instances[if_name] = Interface(if_name) return interface_instances[if_name] def mac_address(if_name): return get_interface(if_name).mac_address def delete_interfaces(): for if_name in interface_instances.keys(): del interface_instances[if_name]
true
true
1c45eb2407679dda457ec86794fdeaee8e70ab96
90,227
py
Python
src/reportlab/pdfbase/pdfdoc.py
radjkarl/reportlab
48cafb6d64ff92fd9d4f9a4dd888be6f7d55b765
[ "BSD-3-Clause" ]
51
2015-01-20T19:50:34.000Z
2022-03-05T21:23:32.000Z
src/reportlab/pdfbase/pdfdoc.py
radjkarl/reportlab
48cafb6d64ff92fd9d4f9a4dd888be6f7d55b765
[ "BSD-3-Clause" ]
16
2015-11-15T04:23:43.000Z
2021-09-27T14:14:20.000Z
src/reportlab/pdfbase/pdfdoc.py
radjkarl/reportlab
48cafb6d64ff92fd9d4f9a4dd888be6f7d55b765
[ "BSD-3-Clause" ]
46
2015-03-28T10:18:14.000Z
2021-12-16T15:57:47.000Z
#Copyright ReportLab Europe Ltd. 2000-2012 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfbase/pdfdoc.py __version__=''' $Id$ ''' __doc__=""" The module pdfdoc.py handles the 'outer structure' of PDF documents, ensuring that all objects are properly cross-referenced and indexed to the nearest byte. The 'inner structure' - the page descriptions - are presumed to be generated before each page is saved. pdfgen.py calls this and provides a 'canvas' object to handle page marking operators. piddlePDF calls pdfgen and offers a high-level interface. The classes within this generally mirror structures in the PDF file and are not part of any public interface. Instead, canvas and font classes are made available elsewhere for users to manipulate. """ import types, binascii, codecs from collections import OrderedDict from reportlab.pdfbase import pdfutils from reportlab import rl_config from reportlab.lib.utils import import_zlib, open_for_read, makeFileName, isSeq, isBytes, isUnicode, _digester, isStr, bytestr, isPy3, annotateException from reportlab.lib.rl_accel import escapePDF, fp_str, asciiBase85Encode, asciiBase85Decode from reportlab.pdfbase import pdfmetrics from hashlib import md5 from sys import platform from sys import version_info from sys import stderr if platform[:4] == 'java' and version_info[:2] == (2, 1): # workaround for list()-bug in Jython 2.1 (should be fixed in 2.2) def list(sequence): def f(x): return x return list(map(f, sequence)) class PDFError(Exception): pass # __InternalName__ is a special attribute that can only be set by the Document arbitrator __InternalName__ = "__InternalName__" # __RefOnly__ marks reference only elements that must be formatted on top level __RefOnly__ = "__RefOnly__" # __Comment__ provides a (one line) comment to inline with an object ref, if present # if it is more than one line then percentize it... __Comment__ = "__Comment__" # name for standard font dictionary BasicFonts = "BasicFonts" # name for the pages object Pages = "Pages" PDF_VERSION_DEFAULT = (1, 3) PDF_SUPPORT_VERSION = dict( #map keyword to min version that supports it transparency = (1, 4), ) if isPy3: def pdfdocEnc(x): return x.encode('extpdfdoc') if isinstance(x,str) else x else: def pdfdocEnc(x): return x.encode('extpdfdoc') if isinstance(x,unicode) else x def format(element, document, toplevel=0): """Indirection step for formatting. Ensures that document parameters alter behaviour of formatting for all elements. """ if isinstance(element,PDFObject): if not toplevel and hasattr(element, __RefOnly__): # the object cannot be a component at non top level. # make a reference to it and return it's format return document.Reference(element).format(document) else: f = element.format(document) if not rl_config.invariant and rl_config.pdfComments and hasattr(element, __Comment__): f = pdfdocEnc("%% %s\r\n" % element.__Comment__)+f return f elif type(element) in (float, int): #use a controlled number formatting routine #instead of str, so Jython/Python etc do not differ return pdfdocEnc(fp_str(element)) elif isBytes(element): return element elif isUnicode(element): return pdfdocEnc(element) else: return pdfdocEnc(str(element)) def xObjectName(externalname): return "FormXob.%s" % externalname # backwards compatibility formName = xObjectName # no encryption class NoEncryption: def encode(self, t): "encode a string, stream, text" return t def prepare(self, document): # get ready to do encryption pass def register(self, objnum, version): # enter a new direct object pass def info(self): # the representation of self in file if any (should be None or PDFDict) return None class PDFObject(object): pass class DummyDoc(PDFObject): "used to bypass encryption when required" encrypt = NoEncryption() ### the global document structure manager class PDFDocument(PDFObject): # set this to define filters defaultStreamFilters = None encrypt = NoEncryption() # default no encryption def __init__(self, dummyoutline=0, compression=rl_config.pageCompression, invariant=rl_config.invariant, filename=None, pdfVersion=PDF_VERSION_DEFAULT, ): self._ID = None self.objectcounter = 0 self.shadingCounter = 0 self.inObject = None self.pageCounter = 1 # allow None value to be passed in to mean 'give system defaults' if invariant is None: self.invariant = rl_config.invariant else: self.invariant = invariant self.setCompression(compression) self._pdfVersion = pdfVersion # signature for creating PDF ID sig = self.signature = md5() sig.update(b"a reportlab document") if not self.invariant: cat = _getTimeStamp() else: cat = 946684800.0 cat = ascii(cat) sig.update(bytestr(cat)) # initialize with timestamp digest # mapping of internal identifier ("Page001") to PDF objectnumber and generation number (34, 0) self.idToObjectNumberAndVersion = {} # mapping of internal identifier ("Page001") to PDF object (PDFPage instance) self.idToObject = {} # internal id to file location self.idToOffset = {} # number to id self.numberToId = {} cat = self.Catalog = self._catalog = PDFCatalog() pages = self.Pages = PDFPages() cat.Pages = pages if dummyoutline: outlines = PDFOutlines0() else: outlines = PDFOutlines() self.Outlines = self.outline = outlines cat.Outlines = outlines self.info = PDFInfo() self.info.invariant = self.invariant #self.Reference(self.Catalog) #self.Reference(self.Info) self.fontMapping = {} #make an empty font dictionary DD = PDFDictionary({}) DD.__Comment__ = "The standard fonts dictionary" self.Reference(DD, BasicFonts) self.delayedFonts = [] def setCompression(self, onoff): # XXX: maybe this should also set self.defaultStreamFilters? self.compression = onoff def ensureMinPdfVersion(self, *keys): "Ensure that the pdf version is greater than or equal to that specified by the keys" for k in keys: self._pdfVersion = max(self._pdfVersion, PDF_SUPPORT_VERSION[k]) def updateSignature(self, thing): "add information to the signature" if self._ID: return # but not if its used already! self.signature.update(bytestr(thing)) def ID(self): "A unique fingerprint for the file (unless in invariant mode)" if self._ID: return self._ID digest = self.signature.digest() doc = DummyDoc() IDs = PDFString(digest,enc='raw').format(doc) self._ID = (b'\r\n % ReportLab generated PDF document -- digest (http://www.reportlab.com)\r\n [' +IDs+b' '+IDs+b']\r\n') return self._ID def SaveToFile(self, filename, canvas): if hasattr(getattr(filename, "write",None),'__call__'): myfile = 0 f = filename filename = makeFileName(getattr(filename,'name','')) else : myfile = 1 filename = makeFileName(filename) f = open(filename, "wb") data = self.GetPDFData(canvas) if isUnicode(data): data = data.encode('latin1') f.write(data) if myfile: f.close() import os if os.name=='mac': from reportlab.lib.utils import markfilename markfilename(filename) # do platform specific file junk if getattr(canvas,'_verbosity',None): print('saved %s' % (filename,)) def GetPDFData(self, canvas): # realize delayed fonts for fnt in self.delayedFonts: fnt.addObjects(self) # add info stuff to signature self.info.invariant = self.invariant self.info.digest(self.signature) ### later: maybe add more info to sig? # prepare outline self.Reference(self.Catalog) self.Reference(self.info) outline = self.outline outline.prepare(self, canvas) return self.format() def inPage(self): """specify the current object as a page (enables reference binding and other page features)""" if self.inObject is not None: if self.inObject=="page": return raise ValueError("can't go in page already in object %s" % self.inObject) self.inObject = "page" def inForm(self): """specify that we are in a form xobject (disable page features, etc)""" # don't need this check anymore since going in a form pushes old context at canvas level. #if self.inObject not in ["form", None]: # raise ValueError("can't go in form already in object %s" % self.inObject) self.inObject = "form" # don't need to do anything else, I think... def getInternalFontName(self, psfontname): fm = self.fontMapping if psfontname in fm: return fm[psfontname] else: try: # does pdfmetrics know about it? if so, add fontObj = pdfmetrics.getFont(psfontname) if fontObj._dynamicFont: raise PDFError("getInternalFontName(%s) called for a dynamic font" % repr(psfontname)) fontObj.addObjects(self) return fm[psfontname] except KeyError: raise PDFError("Font %s not known!" % repr(psfontname)) def thisPageName(self): return "Page"+repr(self.pageCounter) def thisPageRef(self): return PDFObjectReference(self.thisPageName()) def addPage(self, page): name = self.thisPageName() self.Reference(page, name) self.Pages.addPage(page) self.pageCounter += 1 self.inObject = None def addForm(self, name, form): """add a Form XObject.""" # XXX should check that name is a legal PDF name if self.inObject != "form": self.inForm() self.Reference(form, xObjectName(name)) self.inObject = None def annotationName(self, externalname): return "Annot.%s"%externalname def addAnnotation(self, name, annotation): self.Reference(annotation, self.annotationName(name)) def refAnnotation(self, name): internalname = self.annotationName(name) return PDFObjectReference(internalname) def addShading(self, shading): name = "Sh%d" % self.shadingCounter self.Reference(shading, name) self.shadingCounter += 1 return name def addColor(self,cmyk): sname = cmyk.spotName if not sname: if cmyk.cyan==0 and cmyk.magenta==0 and cmyk.yellow==0: sname = 'BLACK' elif cmyk.black==0 and cmyk.magenta==0 and cmyk.yellow==0: sname = 'CYAN' elif cmyk.cyan==0 and cmyk.black==0 and cmyk.yellow==0: sname = 'MAGENTA' elif cmyk.cyan==0 and cmyk.magenta==0 and cmyk.black==0: sname = 'YELLOW' if not sname: raise ValueError("CMYK colour %r used without a spotName" % cmyk) else: cmyk = cmyk.clone(spotName = sname) name = PDFName(sname)[1:] if name not in self.idToObject: sep = PDFSeparationCMYKColor(cmyk).value() #PDFArray([/Separation /name /DeviceCMYK tint_tf]) self.Reference(sep,name) return name,sname def setTitle(self, title): "embeds in PDF file" if title is None: self.info.title = '(anonymous)' else: self.info.title = title def setAuthor(self, author): "embedded in PDF file" #allow resetting to clear it if author is None: self.info.author = '(anonymous)' else: self.info.author = author def setSubject(self, subject): "embeds in PDF file" #allow resetting to clear it if subject is None: self.info.subject = '(unspecified)' else: self.info.subject = subject def setCreator(self, creator): "embeds in PDF file" #allow resetting to clear it if creator is None: self.info.creator = '(unspecified)' else: self.info.creator = creator def setKeywords(self, keywords): "embeds a string containing keywords in PDF file" #allow resetting to clear it but ensure it's a string if keywords is None: self.info.keywords = '' else: self.info.keywords = keywords def setDateFormatter(self, dateFormatter): self.info._dateFormatter = dateFormatter def getAvailableFonts(self): fontnames = list(self.fontMapping.keys()) # the standard 14 are also always available! (even if not initialized yet) from reportlab.pdfbase import _fontdata for name in _fontdata.standardFonts: if name not in fontnames: fontnames.append(name) fontnames.sort() return fontnames def format(self): # register the Catalog/INfo and then format the objects one by one until exhausted # (possible infinite loop if there is a bug that continually makes new objects/refs...) # Prepare encryption self.encrypt.prepare(self) cat = self.Catalog info = self.info self.Reference(self.Catalog) self.Reference(self.info) # register the encryption dictionary if present encryptref = None encryptinfo = self.encrypt.info() if encryptinfo: encryptref = self.Reference(encryptinfo) # make std fonts (this could be made optional counter = 0 # start at first object (object 1 after preincrement) ids = [] # the collection of object ids in object number order numbertoid = self.numberToId idToNV = self.idToObjectNumberAndVersion idToOb = self.idToObject idToOf = self.idToOffset ### note that new entries may be "appended" DURING FORMATTING done = None # __accum__ allows objects to know where they are in the file etc etc self.__accum__ = File = PDFFile(self._pdfVersion) # output collector while done is None: counter += 1 # do next object... if counter in numbertoid: id = numbertoid[counter] #printidToOb obj = idToOb[id] IO = PDFIndirectObject(id, obj) # register object number and version #encrypt.register(id, IOf = IO.format(self) # add a comment to the PDF output if not rl_config.invariant and rl_config.pdfComments: try: classname = obj.__class__.__name__ except: classname = ascii(obj) File.add("%% %s: class %s \r\n" % (ascii(id), classname[:50])) offset = File.add(IOf) idToOf[id] = offset ids.append(id) else: done = 1 del self.__accum__ # sanity checks (must happen AFTER formatting) lno = len(numbertoid) if counter-1!=lno: raise ValueError("counter %s doesn't match number to id dictionary %s" %(counter, lno)) # now add the xref xref = PDFCrossReferenceTable() xref.addsection(0, ids) xreff = xref.format(self) xrefoffset = File.add(xreff) # now add the trailer trailer = PDFTrailer( startxref = xrefoffset, Size = lno+1, Root = self.Reference(cat), Info = self.Reference(info), Encrypt = encryptref, ID = self.ID(), ) trailerf = trailer.format(self) File.add(trailerf) for ds in getattr(self,'_digiSigs',[]): ds.sign(File) # return string format for pdf file return File.format(self) def hasForm(self, name): """test for existence of named form""" internalname = xObjectName(name) return internalname in self.idToObject def getFormBBox(self, name, boxType="MediaBox"): """get the declared bounding box of the form as a list. If you specify a different PDF box definition (e.g. the ArtBox) and it has one, that's what you'll get.""" internalname = xObjectName(name) if internalname in self.idToObject: theform = self.idToObject[internalname] if hasattr(theform,'_extra_pageCatcher_info'): return theform._extra_pageCatcher_info[boxType] if isinstance(theform, PDFFormXObject): # internally defined form return theform.BBoxList() elif isinstance(theform, PDFStream): # externally defined form return list(theform.dictionary.dict[boxType].sequence) else: raise ValueError("I don't understand the form instance %s" % repr(name)) def getXObjectName(self, name): """Lets canvas find out what form is called internally. Never mind whether it is defined yet or not.""" return xObjectName(name) def xobjDict(self, formnames): """construct an xobject dict (for inclusion in a resource dict, usually) from a list of form names (images not yet supported)""" D = {} for name in formnames: internalname = xObjectName(name) reference = PDFObjectReference(internalname) D[internalname] = reference #print "xobjDict D", D return PDFDictionary(D) def Reference(self, obj, name=None): ### note references may "grow" during the final formatting pass: don't use d.keys()! # don't make references to other references, or non instances, unless they are named! iob = isinstance(obj,PDFObject) idToObject = self.idToObject if name is None and (not iob or obj.__class__ is PDFObjectReference): return obj if hasattr(obj, __InternalName__): # already registered intname = obj.__InternalName__ if name is not None and name!=intname: raise ValueError("attempt to reregister object %s with new name %s" % ( repr(intname), repr(name))) if intname not in idToObject: raise ValueError("object of type %s named as %s, but not registered" % (type(obj),ascii(intname))) return PDFObjectReference(intname) # otherwise register the new object objectcounter = self.objectcounter = self.objectcounter+1 if name is None: name = "R"+repr(objectcounter) if name in idToObject: other = idToObject[name] if other!=obj: raise ValueError("redefining named object: "+repr(name)) return PDFObjectReference(name) if iob: obj.__InternalName__ = name #print "name", name, "counter", objectcounter self.idToObjectNumberAndVersion[name] = (objectcounter, 0) self.numberToId[objectcounter] = name idToObject[name] = obj return PDFObjectReference(name) ### chapter 4 Objects PDFtrue = "true" PDFfalse = "false" PDFnull = "null" class PDFText(PDFObject): def __init__(self, t): self.t = t def format(self, document): t = self.t if isUnicode(t): t = t.encode('utf-8') result = binascii.hexlify(document.encrypt.encode(t)) return b"<" + result + b">" def __str__(self): dummydoc = DummyDoc() return self.format(dummydoc) def PDFnumber(n): return n import re _re_cleanparens=re.compile('[^()]') del re def _isbalanced(s): '''test whether a string is balanced in parens''' s = _re_cleanparens.sub('',s) n = 0 for c in s: if c=='(': n+=1 else: n -= 1 if n<0: return 0 return not n and 1 or 0 def _checkPdfdoc(utext): '''return true if no Pdfdoc encoding errors''' try: utext.encode('pdfdoc') return 1 except UnicodeEncodeError as e: return 0 class PDFString(PDFObject): def __init__(self, s, escape=1, enc='auto'): '''s can be unicode/utf8 or a PDFString if escape is true then the output will be passed through escape if enc is raw then the string will be left alone if enc is auto we'll try and automatically adapt to utf_16_be if the effective string is not entirely in pdfdoc ''' if isinstance(s,PDFString): self.s = s.s self.escape = s.escape self.enc = s.enc else: self.s = s self.escape = escape self.enc = enc def format(self, document): s = self.s enc = getattr(self,'enc','auto') if (isBytes(s)): if enc is 'auto': try: u = s.decode(s.startswith(codecs.BOM_UTF16_BE) and 'utf16' or 'utf8') if _checkPdfdoc(u): s = u.encode('pdfdoc') else: s = codecs.BOM_UTF16_BE+u.encode('utf_16_be') except: try: s.decode('pdfdoc') except: stderr.write('Error in %s' % (repr(s),)) raise elif isUnicode(s): if enc is 'auto': if _checkPdfdoc(s): s = s.encode('pdfdoc') else: s = codecs.BOM_UTF16_BE+s.encode('utf_16_be') else: s = codecs.BOM_UTF16_BE+s.encode('utf_16_be') else: raise ValueError('PDFString argument must be str/unicode not %s' % type(s)) escape = getattr(self,'escape',1) if not isinstance(document.encrypt,NoEncryption): s = document.encrypt.encode(s) escape = 1 if escape: try: es = "(%s)" % escapePDF(s) except: raise ValueError("cannot escape %s %s" % (s, repr(s))) if escape&2: es = es.replace('\\012','\n') if escape&4 and _isbalanced(es): es = es.replace('\\(','(').replace('\\)',')') return pdfdocEnc(es) else: return b'(' + s + b')' def __str__(self): return "(%s)" % escapePDF(self.s) def PDFName(data,lo=chr(0x21),hi=chr(0x7e)): # might need to change this to class for encryption # NOTE: RESULT MUST ALWAYS SUPPORT MEANINGFUL COMPARISONS (EQUALITY) AND HASH # first convert the name L = list(data) for i,c in enumerate(L): if c<lo or c>hi or c in "%()<>{}[]#": L[i] = "#"+hex(ord(c))[2:] # forget the 0x thing... return "/"+(''.join(L)) class PDFDictionary(PDFObject): multiline = True def __init__(self, dict=None): """dict should be namestring to value eg "a": 122 NOT pdfname to value NOT "/a":122""" if dict is None: self.dict = {} else: self.dict = dict.copy() def __setitem__(self, name, value): self.dict[name] = value def __getitem__(self, a): return self.dict[a] def __contains__(self,a): return a in self.dict def Reference(self, name, document): self.dict[name] = document.Reference(self.dict[name]) def format(self, document,IND=b'\r\n '): dict = self.dict try: keys = list(dict.keys()) except: print(ascii(dict)) raise if not isinstance(dict,OrderedDict): keys.sort() L = [(format(PDFName(k),document)+b" "+format(dict[k],document)) for k in keys] if self.multiline and rl_config.pdfMultiLine: L = IND.join(L) else: # break up every 6 elements anyway t=L.insert for i in reversed(range(6, len(L), 6)): t(i,b'\r\n ') L = b" ".join(L) return b'<< '+L+b' >>' def copy(self): return PDFDictionary(self.dict) def normalize(self): #normalize the names to use RL standard ie Name not /Name D = self.dict K = [k for k in D.keys() if k.startswith('/')] for k in K: D[k[1:]] = D.pop(k) class checkPDFNames: def __init__(self,*names): self.names = list(map(PDFName,names)) def __call__(self,value): if not value.startswith('/'): value=PDFName(value) if value in self.names: return value def checkPDFBoolean(value): if value in ('true','false'): return value class CheckedPDFDictionary(PDFDictionary): validate = {} def __init__(self,dict=None,validate=None): PDFDictionary.__init__(self,dict) if validate: self.validate = validate def __setitem__(self,name,value): if name not in self.validate: raise ValueError('invalid key, %r' % name) cvalue = self.validate[name](value) if cvalue is None: raise ValueError('Bad value %r for key %r' % (value,name)) PDFDictionary.__setitem__(self,name,cvalue) class ViewerPreferencesPDFDictionary(CheckedPDFDictionary): validate=dict( HideToolbar=checkPDFBoolean, HideMenubar=checkPDFBoolean, HideWindowUI=checkPDFBoolean, FitWindow=checkPDFBoolean, CenterWindow=checkPDFBoolean, DisplayDocTitle=checkPDFBoolean, #contributed by mark Erbaugh NonFullScreenPageMode=checkPDFNames(*'UseNone UseOutlines UseThumbs UseOC'.split()), Direction=checkPDFNames(*'L2R R2L'.split()), ViewArea=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()), ViewClip=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()), PrintArea=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()), PrintClip=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()), PrintScaling=checkPDFNames(*'None AppDefault'.split()), ) # stream filters are objects to support round trip and # possibly in the future also support parameters class PDFStreamFilterZCompress: pdfname = "FlateDecode" def encode(self, text): from reportlab.lib.utils import import_zlib zlib = import_zlib() if not zlib: raise ImportError("cannot z-compress zlib unavailable") if isUnicode(text): text = text.encode('utf8') return zlib.compress(text) def decode(self, encoded): from reportlab.lib.utils import import_zlib zlib = import_zlib() if not zlib: raise ImportError("cannot z-decompress zlib unavailable") return zlib.decompress(encoded) # need only one of these, unless we implement parameters later PDFZCompress = PDFStreamFilterZCompress() class PDFStreamFilterBase85Encode: pdfname = "ASCII85Decode" def encode(self, text): from reportlab.pdfbase.pdfutils import _wrap text = asciiBase85Encode(text) if rl_config.wrapA85: text = _wrap(text) return text def decode(self, text): return asciiBase85Decode(text) # need only one of these too PDFBase85Encode = PDFStreamFilterBase85Encode() class PDFStream(PDFObject): '''set dictionary elements explicitly stream.dictionary[name]=value''' ### compression stuff not implemented yet __RefOnly__ = 1 # must be at top level def __init__(self, dictionary=None, content=None, filters=None): if dictionary is None: dictionary = PDFDictionary() self.dictionary = dictionary self.content = content self.filters = filters def format(self, document): dictionary = self.dictionary # copy it for modification dictionary = PDFDictionary(dictionary.dict.copy()) content = self.content filters = self.filters if self.content is None: raise ValueError("stream content not set") if filters is None: filters = document.defaultStreamFilters # only apply filters if they haven't been applied elsewhere if filters is not None and "Filter" not in dictionary.dict: # apply filters in reverse order listed rf = list(filters) rf.reverse() fnames = [] for f in rf: #print "*****************content:"; print repr(content[:200]) #print "*****************filter", f.pdfname content = f.encode(content) fnames.insert(0, PDFName(f.pdfname)) #print "*****************finally:"; print content[:200] #print "****** FILTERS", fnames #stop dictionary["Filter"] = PDFArray(fnames) # "stream encoding is done after all filters have been applied" content = document.encrypt.encode(content) fc = format(content, document) dictionary["Length"] = len(content) fd = format(dictionary, document) return fd+b'\r\nstream\r\n'+fc+b'endstream\r\n' def teststream(content=None): #content = "" # test if content is None: content = teststreamcontent content = content.strip() content = content.replace("\n", '\n\r') + '\n\r' S = PDFStream(content = content, filters=rl_config.useA85 and [PDFBase85Encode,PDFZCompress] or [PDFZCompress]) # nothing else needed... S.__Comment__ = "test stream" return S teststreamcontent = """ 1 0 0 1 0 0 cm BT /F9 12 Tf 14.4 TL ET 1.00 0.00 1.00 rg n 72.00 72.00 432.00 648.00 re B* """ class PDFArray(PDFObject): multiline = True def __init__(self, sequence): self.sequence = list(sequence) def References(self, document): """make all objects in sequence references""" self.sequence = list(map(document.Reference, self.sequence)) def format(self, document, IND=b'\r\n '): L = [format(e, document) for e in self.sequence] if self.multiline and rl_config.pdfMultiLine: L = IND.join(L) else: n=len(L) if n>10: # break up every 10 elements anyway t=L.insert for i in reversed(range(10, n, 10)): t(i,b'\r\n ') L = b' '.join(L) else: L = b' '.join(L) return b'[ ' + L + b' ]' class PDFArrayCompact(PDFArray): multiline=False class PDFIndirectObject(PDFObject): __RefOnly__ = 1 def __init__(self, name, content): self.name = name self.content = content def format(self, document): name = self.name n, v = document.idToObjectNumberAndVersion[name] # set encryption parameters document.encrypt.register(n, v) fcontent = format(self.content, document, toplevel=1) # yes this is at top level return (pdfdocEnc("%s %s obj\r\n"%(n,v)) +fcontent+ (b'' if fcontent.endswith(b'\r\n') else b'\r\n') +b'endobj\r\n') class PDFObjectReference(PDFObject): def __init__(self, name): self.name = name def format(self, document): try: return pdfdocEnc("%s %s R" % document.idToObjectNumberAndVersion[self.name]) except: raise KeyError("forward reference to %s not resolved upon final formatting" % repr(self.name)) class PDFFile(PDFObject): ### just accumulates strings: keeps track of current offset def __init__(self,pdfVersion=PDF_VERSION_DEFAULT): self.strings = [] self.write = self.strings.append self.offset = 0 ### chapter 5 # Following Ken Lunde's advice and the PDF spec, this includes # some high-order bytes. I chose the characters for Tokyo # in Shift-JIS encoding, as these cannot be mistaken for # any other encoding, and we'll be able to tell if something # has run our PDF files through a dodgy Unicode conversion. self.add((pdfdocEnc("%%PDF-%s.%s" % pdfVersion) + b'\r\n%\223\214\213\236 ReportLab Generated PDF document http://www.reportlab.com\r\n' )) def closeOrReset(self): pass def add(self, s): """should be constructed as late as possible, return position where placed""" s = pdfdocEnc(s) result = self.offset self.offset = result+len(s) self.write(s) return result def format(self, document): return b''.join(self.strings) XREFFMT = '%0.10d %0.5d n' class PDFCrossReferenceSubsection(PDFObject): def __init__(self, firstentrynumber, idsequence): self.firstentrynumber = firstentrynumber self.idsequence = idsequence def format(self, document): """id sequence should represent contiguous object nums else error. free numbers not supported (yet)""" firstentrynumber = self.firstentrynumber idsequence = self.idsequence entries = list(idsequence) nentries = len(idsequence) # special case: object number 0 is always free taken = {} if firstentrynumber==0: taken[0] = "standard free entry" nentries = nentries+1 entries.insert(0, "0000000000 65535 f") idToNV = document.idToObjectNumberAndVersion idToOffset = document.idToOffset lastentrynumber = firstentrynumber+nentries-1 for id in idsequence: (num, version) = idToNV[id] if num in taken: raise ValueError("object number collision %s %s %s" % (num, repr(id), repr(taken[id]))) if num>lastentrynumber or num<firstentrynumber: raise ValueError("object number %s not in range %s..%s" % (num, firstentrynumber, lastentrynumber)) # compute position in list rnum = num-firstentrynumber taken[num] = id offset = idToOffset[id] entries[num] = XREFFMT % (offset, version) # now add the initial line firstline = "%s %s" % (firstentrynumber, nentries) entries.insert(0, firstline) # make sure it ends with \r\n entries.append("") return pdfdocEnc('\r\n'.join(entries)) class PDFCrossReferenceTable(PDFObject): def __init__(self): self.sections = [] def addsection(self, firstentry, ids): section = PDFCrossReferenceSubsection(firstentry, ids) self.sections.append(section) def format(self, document): sections = self.sections if not sections: raise ValueError("no crossref sections") L = [b"xref\r\n"] for s in self.sections: fs = format(s, document) L.append(fs) return pdfdocEnc(b''.join(L)) class PDFTrailer(PDFObject): def __init__(self, startxref, Size=None, Prev=None, Root=None, Info=None, ID=None, Encrypt=None): self.startxref = startxref if Size is None or Root is None: raise ValueError("Size and Root keys required") dict = self.dict = PDFDictionary() for (n,v) in [("Size", Size), ("Prev", Prev), ("Root", Root), ("Info", Info), ("ID", ID), ("Encrypt", Encrypt)]: if v is not None: dict[n] = v def format(self, document): fdict = format(self.dict, document) return b''.join([ b'trailer\r\n', fdict, b'\r\nstartxref\r\n', pdfdocEnc(str(self.startxref)), b'\r\n%%EOF\r\n', ] ) #### XXXX skipping incremental update, #### encryption #### chapter 6, doc structure class PDFCatalog(PDFObject): __Comment__ = "Document Root" __RefOnly__ = 1 # to override, set as attributes __Defaults__ = {"Type": PDFName("Catalog"), "PageMode": PDFName("UseNone"), "Lang": None, } __NoDefault__ = """ Dests Outlines Pages Threads AcroForm Names OpenAction PageMode URI ViewerPreferences PageLabels PageLayout JavaScript StructTreeRoot SpiderInfo""".split() __Refs__ = __NoDefault__ # make these all into references, if present def format(self, document): self.check_format(document) defaults = self.__Defaults__ Refs = self.__Refs__ D = {} for k,v in defaults.items(): v = getattr(self,k,v) if v is not None: D[k] = v for k in self.__NoDefault__: v = getattr(self,k,None) if v is not None: D[k] = v # force objects to be references where required for k in Refs: if k in D: #print"k is", k, "value", D[k] D[k] = document.Reference(D[k]) dict = PDFDictionary(D) return format(dict, document) def showOutline(self): self.setPageMode("UseOutlines") def showFullScreen(self): self.setPageMode("FullScreen") def setPageLayout(self,layout): if layout: self.PageLayout = PDFName(layout) def setPageMode(self,mode): if mode: self.PageMode = PDFName(mode) def check_format(self, document): """for use in subclasses""" pass class PDFPages(PDFCatalog): """PAGES TREE WITH ONE INTERNAL NODE, FOR "BALANCING" CHANGE IMPLEMENTATION""" __Comment__ = "page tree" __RefOnly__ = 1 # note: could implement page attribute inheritance... __Defaults__ = {"Type": PDFName("Pages"), } __NoDefault__ = "Kids Count Parent".split() __Refs__ = ["Parent"] def __init__(self): self.pages = [] def __getitem__(self, item): return self.pages[item] def addPage(self, page): self.pages.append(page) def check_format(self, document): # convert all pages to page references pages = self.pages kids = PDFArray(pages) # make sure all pages are references kids.References(document) self.Kids = kids self.Count = len(pages) class PDFPage(PDFCatalog): __Comment__ = "Page dictionary" # all PDF attributes can be set explicitly # if this flag is set, the "usual" behavior will be suppressed Override_default_compilation = 0 __RefOnly__ = 1 __Defaults__ = {"Type": PDFName("Page"), # "Parent": PDFObjectReference(Pages), # no! use document.Pages } __NoDefault__ = """Parent MediaBox Resources Contents CropBox Rotate Thumb Annots B Dur Hid Trans AA PieceInfo LastModified SeparationInfo ArtBox TrimBox BleedBox ID PZ Trans""".split() __Refs__ = """Contents Parent ID""".split() pagewidth = 595 pageheight = 842 stream = None hasImages = 0 compression = 0 XObjects = None _colorsUsed = {} _shadingsUsed = {} Trans = None # transitionstring? # xobjects? # annotations def __init__(self): # set all nodefaults to None for name in self.__NoDefault__: setattr(self, name, None) def setCompression(self, onoff): self.compression = onoff def setStream(self, code): if self.Override_default_compilation: raise ValueError("overridden! must set stream explicitly") if isSeq(code): code = '\r\n'.join(code)+'\r\n' self.stream = code def setPageTransition(self, tranDict): self.Trans = PDFDictionary(tranDict) def check_format(self, document): # set up parameters unless usual behaviour is suppressed if self.Override_default_compilation: return self.MediaBox = self.MediaBox or PDFArray(self.Rotate in (90,270) and [0,0,self.pageheight,self.pagewidth] or [0, 0, self.pagewidth, self.pageheight]) if not self.Annots: self.Annots = None else: #print self.Annots #raise ValueError("annotations not reimplemented yet") if not isinstance(self.Annots,PDFObject): self.Annots = PDFArray(self.Annots) if not self.Contents: stream = self.stream if not stream: self.Contents = teststream() else: S = PDFStream() if self.compression: S.filters = rl_config.useA85 and [PDFBase85Encode, PDFZCompress] or [PDFZCompress] S.content = stream S.__Comment__ = "page stream" self.Contents = S if not self.Resources: resources = PDFResourceDictionary() # fonts! resources.basicFonts() if self.hasImages: resources.allProcs() else: resources.basicProcs() if self.XObjects: #print "XObjects", self.XObjects.dict resources.XObject = self.XObjects if self.ExtGState: resources.ExtGState = self.ExtGState resources.setShading(self._shadingUsed) resources.setColorSpace(self._colorsUsed) self.Resources = resources if not self.Parent: pages = document.Pages self.Parent = document.Reference(pages) #this code contributed by Christian Jacobs <cljacobsen@gmail.com> class DuplicatePageLabelPage(Exception): pass class PDFPageLabels(PDFCatalog): __comment__ = None __RefOnly__ = 0 __Defaults__ = {} __NoDefault__ = ["Nums"] __Refs__ = [] def __init__(self): self.labels = [] def addPageLabel(self, page, label): """ Adds a new PDFPageLabel to this catalog. The 'page' argument, an integer, is the page number in the PDF document with which the 'label' should be associated. Page numbering in the PDF starts at zero! Thus, to change the label on the first page, '0' should be provided as an argument, and to change the 6th page, '5' should be provided as the argument. The 'label' argument should be a PDFPageLabel instance, which describes the format of the labels starting on page 'page' in the PDF and continuing until the next encounter of a PDFPageLabel. The order in which labels are added is not important. """ self.labels.append((page, label)) def format(self, document): try: self.labels.sort() except DuplicatePageLabelPage: tmp = sorted([x[0] for x in self.labels]) annotateException('\n\n!!!!! Duplicate PageLabel seen for pages %r' % list(set([x for x in tmp if tmp.count(x)>1]))) labels = [] for page, label in self.labels: labels.append(page) labels.append(label) self.Nums = PDFArray(labels) #PDFArray makes a copy with list() return PDFCatalog.format(self, document) class PDFPageLabel(PDFCatalog): __Comment__ = None __RefOnly__ = 0 __Defaults__ = {} __NoDefault__ = "Type S P St".split() __convertible__ = 'ARABIC ROMAN_UPPER ROMAN_LOWER LETTERS_UPPER LETTERS_LOWER' ARABIC = 'D' ROMAN_UPPER = 'R' ROMAN_LOWER = 'r' LETTERS_UPPER = 'A' LETTERS_LOWER = 'a' def __init__(self, style=None, start=None, prefix=None): """ A PDFPageLabel changes the style of page numbering as displayed in a PDF viewer. PDF page labels have nothing to do with 'physical' page numbers printed on a canvas, but instead influence the 'logical' page numbers displayed by PDF viewers. However, when using roman numerals (i, ii, iii...) or page prefixes for appendecies (A.1, A.2...) on the physical pages PDF page labels are necessary to change the logical page numbers displayed by the PDF viewer to match up with the physical numbers. A PDFPageLabel changes the properties of numbering at the page on which it appears (see the class 'PDFPageLabels' for specifying where a PDFPageLabel is associated) and all subsequent pages, until a new PDFPageLabel is encountered. The arguments to this initialiser determine the properties of all subsequent page labels. 'style' determines the numberings style, arabic, roman, letters; 'start' specifies the starting number; and 'prefix' any prefix to be applied to the page numbers. All these arguments can be left out or set to None. * style: - None: No numbering, can be used to display the prefix only. - PDFPageLabel.ARABIC: Use arabic numbers: 1, 2, 3, 4... - PDFPageLabel.ROMAN_UPPER: Use upper case roman numerals: I, II, III... - PDFPageLabel.ROMAN_LOWER: Use lower case roman numerals: i, ii, iii... - PDFPageLabel.LETTERS_UPPER: Use upper case letters: A, B, C, D... - PDFPageLabel.LETTERS_LOWER: Use lower case letters: a, b, c, d... * start: - An integer specifying the starting number for this PDFPageLabel. This can be used when numbering style changes to reset the page number back to one, ie from roman to arabic, or from arabic to appendecies. Can be any positive integer or None. I'm not sure what the effect of specifying None is, probably that page numbering continues with the current sequence, I'd have to check the spec to clarify though. * prefix: - A string which is prefixed to the page numbers. Can be used to display appendecies in the format: A.1, A.2, ..., B.1, B.2, ... where a PDFPageLabel is used to set the properties for the first page of each appendix to restart the page numbering at one and set the prefix to the appropriate letter for current appendix. The prefix can also be used to display text only, if the 'style' is set to None. This can be used to display strings such as 'Front', 'Back', or 'Cover' for the covers on books. """ if style: if style.upper() in self.__convertible__: style = getattr(self,style.upper()) self.S = PDFName(style) if start: self.St = PDFnumber(start) if prefix: self.P = PDFString(prefix) def __lt__(self,oth): if rl_config.errorOnDuplicatePageLabelPage: raise DuplicatePageLabelPage() return False #ends code contributed by Christian Jacobs <cljacobsen@gmail.com> def testpage(document): P = PDFPage() P.Contents = teststream() pages = document.Pages P.Parent = document.Reference(pages) P.MediaBox = PDFArray([0, 0, 595, 841]) resources = PDFResourceDictionary() resources.allProcs() # enable all procsets resources.basicFonts() P.Resources = resources pages.addPage(P) #### DUMMY OUTLINES IMPLEMENTATION FOR testing DUMMYOUTLINE = """ << /Count 0 /Type /Outlines >>""" class PDFOutlines0(PDFObject): __Comment__ = "TEST OUTLINE!" text = DUMMYOUTLINE.replace("\n", '\r\n') __RefOnly__ = 1 def format(self, document): return pdfdocEnc(self.text) class OutlineEntryObject(PDFObject): "an entry in an outline" Title = Dest = Parent = Prev = Next = First = Last = Count = None def format(self, document): D = {} D["Title"] = PDFString(self.Title) D["Parent"] = self.Parent D["Dest"] = self.Dest for n in ("Prev", "Next", "First", "Last", "Count"): v = getattr(self, n) if v is not None: D[n] = v PD = PDFDictionary(D) return PD.format(document) class PDFOutlines(PDFObject): """ takes a recursive list of outline destinations like:: out = PDFOutline1() out.setNames(canvas, # requires canvas for name resolution "chapter1dest", ("chapter2dest", ["chapter2section1dest", "chapter2section2dest", "chapter2conclusiondest"] ), # end of chapter2 description "chapter3dest", ("chapter4dest", ["c4s1", "c4s2"]) ) Higher layers may build this structure incrementally. KISS at base level. """ # first attempt, many possible features missing. #no init for now mydestinations = ready = None counter = 0 currentlevel = -1 # ie, no levels yet def __init__(self): self.destinationnamestotitles = {} self.destinationstotitles = {} self.levelstack = [] self.buildtree = [] self.closedict = {} # dictionary of "closed" destinations in the outline def addOutlineEntry(self, destinationname, level=0, title=None, closed=None): """destinationname of None means "close the tree" """ if destinationname is None and level!=0: raise ValueError("close tree must have level of 0") if not isinstance(level,int): raise ValueError("level must be integer, got %s" % type(level)) if level<0: raise ValueError("negative levels not allowed") if title is None: title = destinationname currentlevel = self.currentlevel stack = self.levelstack tree = self.buildtree # adjust currentlevel and stack to match level if level>currentlevel: if level>currentlevel+1: raise ValueError("can't jump from outline level %s to level %s, need intermediates (destinationname=%r, title=%r)" %(currentlevel, level, destinationname, title)) level = currentlevel = currentlevel+1 stack.append([]) while level<currentlevel: # pop off levels to match current = stack[-1] del stack[-1] previous = stack[-1] lastinprevious = previous[-1] if isinstance(lastinprevious,tuple): (name, sectionlist) = lastinprevious raise ValueError("cannot reset existing sections: " + repr(lastinprevious)) else: name = lastinprevious sectionlist = current previous[-1] = (name, sectionlist) #sectionlist.append(current) currentlevel = currentlevel-1 if destinationname is None: return stack[-1].append(destinationname) self.destinationnamestotitles[destinationname] = title if closed: self.closedict[destinationname] = 1 self.currentlevel = level def setDestinations(self, destinationtree): self.mydestinations = destinationtree def format(self, document): D = {} D["Type"] = PDFName("Outlines") c = self.count D["Count"] = c if c!=0: D["First"] = self.first D["Last"] = self.last PD = PDFDictionary(D) return PD.format(document) def setNames(self, canvas, *nametree): desttree = self.translateNames(canvas, nametree) self.setDestinations(desttree) def setNameList(self, canvas, nametree): "Explicit list so I don't need to do in the caller" desttree = self.translateNames(canvas, nametree) self.setDestinations(desttree) def translateNames(self, canvas, object): "recursively translate tree of names into tree of destinations" destinationnamestotitles = self.destinationnamestotitles destinationstotitles = self.destinationstotitles closedict = self.closedict if isStr(object): if not isUnicode(object): object = object.decode('utf8') destination = canvas._bookmarkReference(object) title = object if object in destinationnamestotitles: title = destinationnamestotitles[object] else: destinationnamestotitles[title] = title destinationstotitles[destination] = title if object in closedict: closedict[destination] = 1 # mark destination closed return {object: canvas._bookmarkReference(object)} # name-->ref if isSeq(object): L = [] for o in object: L.append(self.translateNames(canvas, o)) if isinstance(object,tuple): return tuple(L) return L # bug contributed by Benjamin Dumke <reportlab@benjamin-dumke.de> raise TypeError("in outline, destination name must be string: got a %s"%type(object)) def prepare(self, document, canvas): """prepare all data structures required for save operation (create related objects)""" if self.mydestinations is None: if self.levelstack: self.addOutlineEntry(None) # close the tree destnames = self.levelstack[0] #from pprint import pprint; pprint(destnames); stop self.mydestinations = self.translateNames(canvas, destnames) else: self.first = self.last = None self.count = 0 self.ready = 1 return #self.first = document.objectReference("Outline.First") #self.last = document.objectReference("Outline.Last") # XXXX this needs to be generalized for closed entries! self.count = count(self.mydestinations, self.closedict) (self.first, self.last) = self.maketree(document, self.mydestinations, toplevel=1) self.ready = 1 def maketree(self, document, destinationtree, Parent=None, toplevel=0): if toplevel: levelname = "Outline" Parent = document.Reference(document.Outlines) else: self.count = self.count+1 levelname = "Outline.%s" % self.count if Parent is None: raise ValueError("non-top level outline elt parent must be specified") if not isSeq(destinationtree): raise ValueError("destinationtree must be list or tuple, got %s") nelts = len(destinationtree) lastindex = nelts-1 lastelt = firstref = lastref = None destinationnamestotitles = self.destinationnamestotitles closedict = self.closedict for index in range(nelts): eltobj = OutlineEntryObject() eltobj.Parent = Parent eltname = "%s.%s" % (levelname, index) eltref = document.Reference(eltobj, eltname) #document.add(eltname, eltobj) if lastelt is not None: lastelt.Next = eltref eltobj.Prev = lastref if firstref is None: firstref = eltref lastref = eltref lastelt = eltobj # advance eltobj lastref = eltref elt = destinationtree[index] if isinstance(elt,dict): # simple leaf {name: dest} leafdict = elt elif isinstance(elt,tuple): # leaf with subsections: ({name: ref}, subsections) XXXX should clean up (see count(...)) try: (leafdict, subsections) = elt except: raise ValueError("destination tree elt tuple should have two elts, got %s" % len(elt)) eltobj.Count = count(subsections, closedict) (eltobj.First, eltobj.Last) = self.maketree(document, subsections, eltref) else: raise ValueError("destination tree elt should be dict or tuple, got %s" % type(elt)) try: [(Title, Dest)] = list(leafdict.items()) except: raise ValueError("bad outline leaf dictionary, should have one entry "+bytestr(elt)) eltobj.Title = destinationnamestotitles[Title] eltobj.Dest = Dest if isinstance(elt,tuple) and Dest in closedict: # closed subsection, count should be negative eltobj.Count = -eltobj.Count return (firstref, lastref) def count(tree, closedict=None): """utility for outline: recursively count leaves in a tuple/list tree""" from operator import add if isinstance(tree,tuple): # leaf with subsections XXXX should clean up this structural usage (leafdict, subsections) = tree [(Title, Dest)] = list(leafdict.items()) if closedict and Dest in closedict: return 1 # closed tree element if isSeq(tree): #return reduce(add, map(count, tree)) counts = [] for e in tree: counts.append(count(e, closedict)) return sum(counts) #used to be: return reduce(add, counts) return 1 class PDFInfo(PDFObject): """PDF documents can have basic information embedded, viewable from File | Document Info in Acrobat Reader. If this is wrong, you get Postscript errors while printing, even though it does not print.""" producer = "ReportLab PDF Library - www.reportlab.com" creator = "ReportLab PDF Library - www.reportlab.com" title = "untitled" author = "anonymous" subject = "unspecified" keywords = "" _dateFormatter = None def __init__(self): self.invariant = rl_config.invariant self.trapped = 'False' #could be 'True' or 'Unknown' def digest(self, md5object): # add self information to signature for x in (self.title, self.author, self.subject, self.keywords): md5object.update(bytestr(x)) def format(self, document): D = {} D["Title"] = PDFString(self.title) D["Author"] = PDFString(self.author) D['ModDate'] = D["CreationDate"] = PDFDate(invariant=self.invariant,dateFormatter=self._dateFormatter) D["Producer"] = PDFString(self.producer) D["Creator"] = PDFString(self.creator) D["Subject"] = PDFString(self.subject) D["Keywords"] = PDFString(self.keywords) D["Trapped"] = PDFName(self.trapped) PD = PDFDictionary(D) return PD.format(document) def copy(self): "shallow copy - useful in pagecatchering" thing = self.__klass__() for k, v in self.__dict__.items(): setattr(thing, k, v) return thing # skipping thumbnails, etc class Annotation(PDFObject): """superclass for all annotations.""" defaults = [("Type", PDFName("Annot"),)] required = ("Type", "Rect", "Contents", "Subtype") permitted = required+( "Border", "C", "T", "M", "F", "H", "BS", "AA", "AS", "Popup", "P", "AP") def cvtdict(self, d, escape=1): """transform dict args from python form to pdf string rep as needed""" Rect = d["Rect"] if not isStr(Rect): d["Rect"] = PDFArray(Rect) d["Contents"] = PDFString(d["Contents"],escape) return d def AnnotationDict(self, **kw): if 'escape' in kw: escape = kw['escape'] del kw['escape'] else: escape = 1 d = {} for (name,val) in self.defaults: d[name] = val d.update(kw) for name in self.required: if name not in d: raise ValueError("keyword argument %s missing" % name) d = self.cvtdict(d,escape=escape) permitted = self.permitted for name in d.keys(): if name not in permitted: raise ValueError("bad annotation dictionary name %s" % name) return PDFDictionary(d) def Dict(self): raise ValueError("DictString undefined for virtual superclass Annotation, must overload") # but usually #return self.AnnotationDict(self, Rect=(a,b,c,d)) or whatever def format(self, document): D = self.Dict() return D.format(document) class TextAnnotation(Annotation): permitted = Annotation.permitted + ( "Open", "Name") def __init__(self, Rect, Contents, **kw): self.Rect = Rect self.Contents = Contents self.otherkw = kw def Dict(self): d = {} d.update(self.otherkw) d["Rect"] = self.Rect d["Contents"] = self.Contents d["Subtype"] = "/Text" return self.AnnotationDict(**d) class FreeTextAnnotation(Annotation): permitted = Annotation.permitted + ("DA",) def __init__(self, Rect, Contents, DA, **kw): self.Rect = Rect self.Contents = Contents self.DA = DA self.otherkw = kw def Dict(self): d = {} d.update(self.otherkw) d["Rect"] = self.Rect d["Contents"] = self.Contents d["DA"] = self.DA d["Subtype"] = "/FreeText" return self.AnnotationDict(**d) class LinkAnnotation(Annotation): permitted = Annotation.permitted + ( "Dest", "A", "PA") def __init__(self, Rect, Contents, Destination, Border="[0 0 1]", **kw): self.Border = Border self.Rect = Rect self.Contents = Contents self.Destination = Destination self.otherkw = kw def dummyDictString(self): # old, testing return """ << /Type /Annot /Subtype /Link /Rect [71 717 190 734] /Border [16 16 1] /Dest [23 0 R /Fit] >> """ def Dict(self): d = {} d.update(self.otherkw) d["Border"] = self.Border d["Rect"] = self.Rect d["Contents"] = self.Contents d["Subtype"] = "/Link" d["Dest"] = self.Destination return self.AnnotationDict(**d) class HighlightAnnotation(Annotation): """ HighlightAnnotation is an annotation that highlights the selected area. Rect is the mouseover area that will show the contents. QuadPoints is a list of points to highlight, you can have many groups of four QuadPoints to allow highlighting many lines. """ permitted = Annotation.permitted + ("QuadPoints", ) def __init__(self, Rect, Contents, QuadPoints, Color=[0.83, 0.89, 0.95], **kw): self.Rect = Rect self.Contents = Contents self.otherkw = kw self.QuadPoints = QuadPoints self.Color = Color def cvtdict(self, d, escape=1): """transform dict args from python form to pdf string rep as needed""" Rect = d["Rect"] Quad = d["QuadPoints"] Color = d["C"] if not isinstance(Rect, str): d["Rect"] = PDFArray(Rect).format(d, IND=b" ") if not isinstance(Quad, str): d["QuadPoints"] = PDFArray(Quad).format(d, IND=b" ") if not isinstance(Color, str): d["C"] = PDFArray(Color).format(d, IND=b" ") d["Contents"] = PDFString(d["Contents"], escape) return d def Dict(self): d = {} d.update(self.otherkw) d["Rect"] = self.Rect d["Contents"] = self.Contents d["Subtype"] = "/Highlight" d["QuadPoints"] = self.QuadPoints d["C"] = self.Color return self.AnnotationDict(**d) def rect_to_quad(Rect): """ Utility method to convert a Rect to a QuadPoint """ return [Rect[0], Rect[1], Rect[2], Rect[1], Rect[0], Rect[3], Rect[2], Rect[3]] # skipping names tree # skipping actions # skipping names trees # skipping to chapter 7 class PDFRectangle(PDFObject): def __init__(self, llx, lly, urx, ury): self.llx, self.lly, self.ulx, self.ury = llx, lly, urx, ury def format(self, document): A = PDFArray([self.llx, self.lly, self.ulx, self.ury]) return format(A, document) _NOWT=None def _getTimeStamp(): global _NOWT if not _NOWT: import time _NOWT = time.time() return _NOWT class PDFDate(PDFObject): # gmt offset now suppported properly def __init__(self, invariant=rl_config.invariant, dateFormatter=None): if invariant: now = (2000,1,1,0,0,0,0) self.dhh = 0 self.dmm = 0 else: import time now = tuple(time.localtime(_getTimeStamp())[:6]) from time import timezone self.dhh = int(timezone / (3600.0)) self.dmm = (timezone % 3600) % 60 self.date = now[:6] self.dateFormatter = dateFormatter def format(self, doc): dfmt = self.dateFormatter or ( lambda yyyy,mm,dd,hh,m,s: "D:%04d%02d%02d%02d%02d%02d%+03d'%02d'" % (yyyy,mm,dd,hh,m,s,self.dhh,self.dmm)) return format(PDFString(dfmt(*self.date)), doc) class Destination(PDFObject): """ not a PDFObject! This is a placeholder that can delegates to a pdf object only after it has been defined by the methods below. EG a Destination can refer to Appendix A before it has been defined, but only if Appendix A is explicitly noted as a destination and resolved before the document is generated... For example the following sequence causes resolution before doc generation. d = Destination() d.fit() # or other format defining method call d.setPage(p) (at present setPageRef is called on generation of the page). """ representation = format = page = None def __init__(self,name): self.name = name self.fmt = self.page = None def format(self, document): f = self.fmt if f is None: raise ValueError("format not resolved, probably missing URL scheme or undefined destination target for '%s'" % self.name) p = self.page if p is None: raise ValueError("Page not bound, probably missing URL scheme or undefined destination target for '%s'" % self.name) f.page = p return f.format(document) def xyz(self, left, top, zoom): # see pdfspec mar 11 99 pp184+ self.fmt = PDFDestinationXYZ(None, left, top, zoom) def fit(self): self.fmt = PDFDestinationFit(None) def fitb(self): self.fmt = PDFDestinationFitB(None) def fith(self, top): self.fmt = PDFDestinationFitH(None,top) def fitv(self, left): self.fmt = PDFDestinationFitV(None, left) def fitbh(self, top): self.fmt = PDFDestinationFitBH(None, top) def fitbv(self, left): self.fmt = PDFDestinationFitBV(None, left) def fitr(self, left, bottom, right, top): self.fmt = PDFDestinationFitR(None, left, bottom, right, top) def setPage(self, page): self.page = page #self.fmt.page = page # may not yet be defined! class PDFDestinationXYZ(PDFObject): typename = "XYZ" def __init__(self, page, left, top, zoom): self.page = page self.top = top self.zoom = zoom self.left = left def format(self, document): pageref = document.Reference(self.page) A = PDFArray( [ pageref, PDFName(self.typename), self.left, self.top, self.zoom ] ) return format(A, document) class PDFDestinationFit(PDFObject): typename = "Fit" def __init__(self, page): self.page = page def format(self, document): pageref = document.Reference(self.page) A = PDFArray( [ pageref, PDFName(self.typename) ] ) return format(A, document) class PDFDestinationFitB(PDFDestinationFit): typename = "FitB" class PDFDestinationFitH(PDFObject): typename = "FitH" def __init__(self, page, top): self.page = page; self.top=top def format(self, document): pageref = document.Reference(self.page) A = PDFArray( [ pageref, PDFName(self.typename), self.top ] ) return format(A, document) class PDFDestinationFitBH(PDFDestinationFitH): typename = "FitBH" class PDFDestinationFitV(PDFObject): typename = "FitV" def __init__(self, page, left): self.page = page; self.left=left def format(self, document): pageref = document.Reference(self.page) A = PDFArray( [ pageref, PDFName(self.typename), self.left ] ) return format(A, document) class PDFDestinationFitBV(PDFDestinationFitV): typename = "FitBV" class PDFDestinationFitR(PDFObject): typename = "FitR" def __init__(self, page, left, bottom, right, top): self.page = page; self.left=left; self.bottom=bottom; self.right=right; self.top=top def format(self, document): pageref = document.Reference(self.page) A = PDFArray( [ pageref, PDFName(self.typename), self.left, self.bottom, self.right, self.top] ) return format(A, document) # named destinations need nothing # skipping filespecs class PDFResourceDictionary(PDFObject): """each element *could* be reset to a reference if desired""" def __init__(self): self.ColorSpace = {} self.XObject = {} self.ExtGState = {} self.Font = {} self.Pattern = {} self.ProcSet = [] self.Properties = {} self.Shading = {} # ?by default define the basicprocs self.basicProcs() stdprocs = [PDFName(s) for s in "PDF Text ImageB ImageC ImageI".split()] dict_attributes = ("ColorSpace", "XObject", "ExtGState", "Font", "Pattern", "Properties", "Shading") def allProcs(self): # define all standard procsets self.ProcSet = self.stdprocs def basicProcs(self): self.ProcSet = self.stdprocs[:2] # just PDF and Text def basicFonts(self): self.Font = PDFObjectReference(BasicFonts) def setColorSpace(self,colorsUsed): for c,s in colorsUsed.items(): self.ColorSpace[s] = PDFObjectReference(c) def setShading(self,shadingUsed): for c,s in shadingUsed.items(): self.Shading[s] = PDFObjectReference(c) def format(self, document): D = {} for dname in self.dict_attributes: v = getattr(self, dname) if isinstance(v,dict): if v: dv = PDFDictionary(v) D[dname] = dv else: D[dname] = v v = self.ProcSet dname = "ProcSet" if isSeq(v): if v: dv = PDFArray(v) D[dname] = dv else: D[dname] = v DD = PDFDictionary(D) return format(DD, document) ############################################################################## # # Font objects - the PDFDocument.addFont() method knows which of these # to construct when given a user-facing Font object # ############################################################################## class PDFType1Font(PDFObject): """no init: set attributes explicitly""" __RefOnly__ = 1 # note! /Name appears to be an undocumented attribute.... name_attributes = "Type Subtype BaseFont Name".split() Type = "Font" Subtype = "Type1" # these attributes are assumed to already be of the right type local_attributes = "FirstChar LastChar Widths Encoding ToUnicode FontDescriptor".split() def format(self, document): D = {} for name in self.name_attributes: if hasattr(self, name): value = getattr(self, name) D[name] = PDFName(value) for name in self.local_attributes: if hasattr(self, name): value = getattr(self, name) D[name] = value #print D PD = PDFDictionary(D) return PD.format(document) ## These attribute listings will be useful in future, even if we ## put them elsewhere class PDFTrueTypeFont(PDFType1Font): Subtype = "TrueType" #local_attributes = "FirstChar LastChar Widths Encoding ToUnicode FontDescriptor".split() #same ##class PDFMMType1Font(PDFType1Font): ## Subtype = "MMType1" ## ##class PDFType3Font(PDFType1Font): ## Subtype = "Type3" ## local_attributes = "FirstChar LastChar Widths CharProcs FontBBox FontMatrix Resources Encoding".split() ## ##class PDFType0Font(PDFType1Font): ## Subtype = "Type0" ## local_attributes = "DescendantFonts Encoding".split( ## ##class PDFCIDFontType0(PDFType1Font): ## Subtype = "CIDFontType0" ## local_attributes = "CIDSystemInfo FontDescriptor DW W DW2 W2 Registry Ordering Supplement".split() ## ##class PDFCIDFontType0(PDFType1Font): ## Subtype = "CIDFontType2" ## local_attributes = "BaseFont CIDToGIDMap CIDSystemInfo FontDescriptor DW W DW2 W2".split() ## ##class PDFEncoding(PDFType1Font): ## Type = "Encoding" ## name_attributes = "Type BaseEncoding".split() ## # these attributes are assumed to already be of the right type ## local_attributes = ["Differences"] ## # UGLY ALERT - this needs turning into something O-O, it was hacked # across from the pdfmetrics.Encoding class to avoid circularity # skipping CMaps class PDFFormXObject(PDFObject): # like page requires .info set by some higher level (doc) # XXXX any resource used in a form must be propagated up to the page that (recursively) uses # the form!! (not implemented yet). XObjects = Annots = BBox = Matrix = Contents = stream = Resources = None hasImages = 1 # probably should change compression = 0 def __init__(self, lowerx, lowery, upperx, uppery): #not done self.lowerx = lowerx; self.lowery=lowery; self.upperx=upperx; self.uppery=uppery def setStreamList(self, data): if isSeq(data): data = '\r\n'.join(data) self.stream = pdfdocEnc(data) def BBoxList(self): "get the declared bounding box for the form as a list" if self.BBox: return list(self.BBox.sequence) else: return [self.lowerx, self.lowery, self.upperx, self.uppery] def format(self, document): self.BBox = self.BBox or PDFArray([self.lowerx, self.lowery, self.upperx, self.uppery]) self.Matrix = self.Matrix or PDFArray([1, 0, 0, 1, 0, 0]) if not self.Annots: self.Annots = None else: #these must be transferred to the page when the form is used raise ValueError("annotations don't work in PDFFormXObjects yet") if not self.Contents: stream = self.stream if not stream: self.Contents = teststream() else: S = PDFStream() S.content = stream # need to add filter stuff (?) S.__Comment__ = "xobject form stream" self.Contents = S if not self.Resources: resources = PDFResourceDictionary() # fonts! resources.basicFonts() if self.hasImages: resources.allProcs() else: resources.basicProcs() if self.XObjects: #print "XObjects", self.XObjects.dict resources.XObject = self.XObjects self.Resources=resources if self.compression: self.Contents.filters = rl_config.useA85 and [PDFBase85Encode, PDFZCompress] or [PDFZCompress] sdict = self.Contents.dictionary sdict["Type"] = PDFName("XObject") sdict["Subtype"] = PDFName("Form") sdict["FormType"] = 1 sdict["BBox"] = self.BBox sdict["Matrix"] = self.Matrix sdict["Resources"] = self.Resources return self.Contents.format(document) class PDFPostScriptXObject(PDFObject): "For embedding PD (e.g. tray commands) in PDF" def __init__(self, content=None): self.content = content def format(self, document): S = PDFStream() S.content = self.content S.__Comment__ = "xobject postscript stream" sdict = S.dictionary sdict["Type"] = PDFName("XObject") sdict["Subtype"] = PDFName("PS") return S.format(document) _mode2CS={'RGB':'DeviceRGB', 'L':'DeviceGray', 'CMYK':'DeviceCMYK'} class PDFImageXObject(PDFObject): # first attempts at a hard-coded one # in the file, Image XObjects are stream objects. We already # have a PDFStream object with 3 attributes: dictionary, content # and filters. So the job of this thing is to construct the # right PDFStream instance and ask it to format itself. def __init__(self, name, source=None, mask=None): self.name = name self.width = 24 self.height = 23 self.bitsPerComponent = 1 self.colorSpace = 'DeviceGray' self._filters = rl_config.useA85 and ('ASCII85Decode',) or () self.streamContent = """ 003B00 002700 002480 0E4940 114920 14B220 3CB650 75FE88 17FF8C 175F14 1C07E2 3803C4 703182 F8EDFC B2BBC2 BB6F84 31BFC2 18EA3C 0E3E00 07FC00 03F800 1E1800 1FF800> """ self.mask = mask if source is None: pass # use the canned one. elif hasattr(source,'jpeg_fh'): self.loadImageFromSRC(source) #it is already a PIL Image else: # it is a filename import os ext = os.path.splitext(source)[1].lower() src = open_for_read(source) try: if not(ext in ('.jpg', '.jpeg') and self.loadImageFromJPEG(src)): if rl_config.useA85: self.loadImageFromA85(src) else: self.loadImageFromRaw(src) finally: src.close() def loadImageFromA85(self,source): IMG=[] imagedata = pdfutils.makeA85Image(source,IMG=IMG,detectJpeg=True) if not imagedata: return self.loadImageFromSRC(IMG[0]) imagedata = [s.strip() for s in imagedata] words = imagedata[1].split() self.width, self.height = (int(words[1]),int(words[3])) self.colorSpace = {'/RGB':'DeviceRGB', '/G':'DeviceGray', '/CMYK':'DeviceCMYK'}[words[7]] self.bitsPerComponent = 8 self._filters = 'ASCII85Decode','FlateDecode' #'A85','Fl' if IMG: self._checkTransparency(IMG[0]) elif self.mask=='auto': self.mask = None self.streamContent = ''.join(imagedata[3:-1]) def loadImageFromJPEG(self,imageFile): try: try: info = pdfutils.readJPEGInfo(imageFile) finally: imageFile.seek(0) #reset file pointer except: return False self.width, self.height = info[0], info[1] self.bitsPerComponent = 8 if info[2] == 1: self.colorSpace = 'DeviceGray' elif info[2] == 3: self.colorSpace = 'DeviceRGB' else: #maybe should generate an error, is this right for CMYK? self.colorSpace = 'DeviceCMYK' self._dotrans = 1 self.streamContent = imageFile.read() if rl_config.useA85: self.streamContent = asciiBase85Encode(self.streamContent) self._filters = 'ASCII85Decode','DCTDecode' #'A85','DCT' else: self._filters = 'DCTDecode', #'DCT' self.mask = None return True def loadImageFromRaw(self,source): IMG=[] imagedata = pdfutils.makeRawImage(source,IMG=IMG,detectJpeg=True) if not imagedata: return self.loadImageFromSRC(IMG[0]) words = imagedata[1].split() self.width = int(words[1]) self.height = int(words[3]) self.colorSpace = {'/RGB':'DeviceRGB', '/G':'DeviceGray', '/CMYK':'DeviceCMYK'}[words[7]] self.bitsPerComponent = 8 self._filters = 'FlateDecode', #'Fl' if IMG: self._checkTransparency(IMG[0]) elif self.mask=='auto': self.mask = None self.streamContent = ''.join(imagedata[3:-1]) def _checkTransparency(self,im): if self.mask=='auto': if im._dataA: self.mask = None self._smask = PDFImageXObject(_digester(im._dataA.getRGBData()),im._dataA,mask=None) self._smask._decode = [0,1] else: tc = im.getTransparent() if tc: self.mask = (tc[0], tc[0], tc[1], tc[1], tc[2], tc[2]) else: self.mask = None elif hasattr(self.mask,'rgb'): _ = self.mask.rgb() self.mask = _[0],_[0],_[1],_[1],_[2],_[2] def loadImageFromSRC(self, im): "Extracts the stream, width and height" fp = im.jpeg_fh() if fp: self.loadImageFromJPEG(fp) else: zlib = import_zlib() if not zlib: return self.width, self.height = im.getSize() raw = im.getRGBData() #assert len(raw) == self.width*self.height, "Wrong amount of data for image expected %sx%s=%s got %s" % (self.width,self.height,self.width*self.height,len(raw)) self.streamContent = zlib.compress(raw) if rl_config.useA85: self.streamContent = asciiBase85Encode(self.streamContent) self._filters = 'ASCII85Decode','FlateDecode' #'A85','Fl' else: self._filters = 'FlateDecode', #'Fl' self.colorSpace= _mode2CS[im.mode] self.bitsPerComponent = 8 self._checkTransparency(im) def format(self, document): S = PDFStream(content = self.streamContent) dict = S.dictionary dict["Type"] = PDFName("XObject") dict["Subtype"] = PDFName("Image") dict["Width"] = self.width dict["Height"] = self.height dict["BitsPerComponent"] = self.bitsPerComponent dict["ColorSpace"] = PDFName(self.colorSpace) if self.colorSpace=='DeviceCMYK' and getattr(self,'_dotrans',0): dict["Decode"] = PDFArray([1,0,1,0,1,0,1,0]) elif getattr(self,'_decode',None): dict["Decode"] = PDFArray(self._decode) dict["Filter"] = PDFArray(map(PDFName,self._filters)) dict["Length"] = len(self.streamContent) if self.mask: dict["Mask"] = PDFArray(self.mask) if getattr(self,'smask',None): dict["SMask"] = self.smask return S.format(document) class PDFSeparationCMYKColor: def __init__(self, cmyk): from reportlab.lib.colors import CMYKColor if not isinstance(cmyk,CMYKColor): raise ValueError('%s needs a CMYKColor argument' % self.__class__.__name__) elif not cmyk.spotName: raise ValueError('%s needs a CMYKColor argument with a spotName' % self.__class__.__name__) self.cmyk = cmyk def _makeFuncPS(self): '''create the postscript code for the tint transfer function effectively this is tint*c, tint*y, ... tint*k''' R = [].append for i,v in enumerate(self.cmyk.cmyk()): v=float(v) if i==3: if v==0.0: R('pop') R('0.0') else: R(str(v)) R('mul') else: if v==0: R('0.0') else: R('dup') R(str(v)) R('mul') R('exch') return '{%s}' % (' '.join(R.__self__)) def value(self): return PDFArrayCompact(( PDFName('Separation'), PDFName(self.cmyk.spotName), PDFName('DeviceCMYK'), PDFStream( dictionary=PDFDictionary(dict( FunctionType=4, Domain=PDFArrayCompact((0,1)), Range=PDFArrayCompact((0,1,0,1,0,1,0,1)) )), content=self._makeFuncPS(), filters=None,#[PDFBase85Encode, PDFZCompress], ) )) class PDFFunction(PDFObject): """superclass for all function types.""" defaults = [] required = ("FunctionType", "Domain") permitted = required+("Range",) def FunctionDict(self, **kw): d = {} for (name,val) in self.defaults: d[name] = val d.update(kw) for name in self.required: if name not in d: raise ValueError("keyword argument %s missing" % name) permitted = self.permitted for name in d.keys(): if name not in permitted: raise ValueError("bad annotation dictionary name %s" % name) return PDFDictionary(d) def Dict(self, document): raise ValueError("Dict undefined for virtual superclass PDFShading, must overload") # but usually #return self.FunctionDict(self, ...) def format(self, document): D = self.Dict(document) return D.format(document) class PDFExponentialFunction(PDFFunction): defaults = PDFFunction.defaults + [("Domain", PDFArrayCompact((0.0, 1.0)))] required = PDFFunction.required + ("N",) permitted = PDFFunction.permitted + ("C0", "C1", "N") def __init__(self, C0, C1, N, **kw): self.C0 = C0 self.C1 = C1 self.N = N self.otherkw = kw def Dict(self, document): d = {} d.update(self.otherkw) d["FunctionType"] = 2 d["C0"] = PDFArrayCompact(self.C0) d["C1"] = PDFArrayCompact(self.C1) d["N"] = self.N return self.FunctionDict(**d) class PDFStitchingFunction(PDFFunction): required = PDFFunction.required + ("Functions", "Bounds", "Encode") permitted = PDFFunction.permitted + ("Functions", "Bounds", "Encode") def __init__(self, Functions, Bounds, Encode, **kw): self.Functions = Functions self.Bounds = Bounds self.Encode = Encode self.otherkw = kw def Dict(self, document): d = {} d.update(self.otherkw) d["FunctionType"] = 3 d["Functions"] = PDFArray([document.Reference(x) for x in self.Functions]) d["Bounds"] = PDFArray(self.Bounds) d["Encode"] = PDFArray(self.Encode) return self.FunctionDict(**d) class PDFShading(PDFObject): """superclass for all shading types.""" required = ("ShadingType", "ColorSpace") permitted = required+("Background", "BBox", "AntiAlias") def ShadingDict(self, **kw): d = {} d.update(kw) for name in self.required: if name not in d: raise ValueError("keyword argument %s missing" % name) permitted = self.permitted for name in d.keys(): if name not in permitted: raise ValueError("bad annotation dictionary name %s" % name) return PDFDictionary(d) def Dict(self, document): raise ValueError("Dict undefined for virtual superclass PDFShading, must overload") # but usually #return self.ShadingDict(self, ...) def format(self, document): D = self.Dict(document) return D.format(document) class PDFFunctionShading(PDFShading): required = PDFShading.required + ("Function",) permitted = PDFShading.permitted + ("Domain", "Matrix", "Function") def __init__(self, Function, ColorSpace, **kw): self.Function = Function self.ColorSpace = ColorSpace self.otherkw = kw def Dict(self, document): d = {} d.update(self.otherkw) d["ShadingType"] = 1 d["ColorSpace"] = PDFName(self.ColorSpace) d["Function"] = document.Reference(self.Function) return self.ShadingDict(**d) class PDFAxialShading(PDFShading): required = PDFShading.required + ("Coords", "Function") permitted = PDFShading.permitted + ( "Coords", "Domain", "Function", "Extend") def __init__(self, x0, y0, x1, y1, Function, ColorSpace, **kw): self.Coords = (x0, y0, x1, y1) self.Function = Function self.ColorSpace = ColorSpace self.otherkw = kw def Dict(self, document): d = {} d.update(self.otherkw) d["ShadingType"] = 2 d["ColorSpace"] = PDFName(self.ColorSpace) d["Coords"] = PDFArrayCompact(self.Coords) d["Function"] = document.Reference(self.Function) return self.ShadingDict(**d) class PDFRadialShading(PDFShading): required = PDFShading.required + ("Coords", "Function") permitted = PDFShading.permitted + ( "Coords", "Domain", "Function", "Extend") def __init__(self, x0, y0, r0, x1, y1, r1, Function, ColorSpace, **kw): self.Coords = (x0, y0, r0, x1, y1, r1) self.Function = Function self.ColorSpace = ColorSpace self.otherkw = kw def Dict(self, document): d = {} d.update(self.otherkw) d["ShadingType"] = 3 d["ColorSpace"] = PDFName(self.ColorSpace) d["Coords"] = PDFArrayCompact(self.Coords) d["Function"] = document.Reference(self.Function) return self.ShadingDict(**d) if __name__=="__main__": print("There is no script interpretation for pdfdoc.")
37.438589
178
0.594501
__version__=''' $Id$ ''' __doc__=""" The module pdfdoc.py handles the 'outer structure' of PDF documents, ensuring that all objects are properly cross-referenced and indexed to the nearest byte. The 'inner structure' - the page descriptions - are presumed to be generated before each page is saved. pdfgen.py calls this and provides a 'canvas' object to handle page marking operators. piddlePDF calls pdfgen and offers a high-level interface. The classes within this generally mirror structures in the PDF file and are not part of any public interface. Instead, canvas and font classes are made available elsewhere for users to manipulate. """ import types, binascii, codecs from collections import OrderedDict from reportlab.pdfbase import pdfutils from reportlab import rl_config from reportlab.lib.utils import import_zlib, open_for_read, makeFileName, isSeq, isBytes, isUnicode, _digester, isStr, bytestr, isPy3, annotateException from reportlab.lib.rl_accel import escapePDF, fp_str, asciiBase85Encode, asciiBase85Decode from reportlab.pdfbase import pdfmetrics from hashlib import md5 from sys import platform from sys import version_info from sys import stderr if platform[:4] == 'java' and version_info[:2] == (2, 1): def list(sequence): def f(x): return x return list(map(f, sequence)) class PDFError(Exception): pass __InternalName__ = "__InternalName__" __RefOnly__ = "__RefOnly__" __Comment__ = "__Comment__" BasicFonts = "BasicFonts" Pages = "Pages" PDF_VERSION_DEFAULT = (1, 3) PDF_SUPPORT_VERSION = dict( transparency = (1, 4), ) if isPy3: def pdfdocEnc(x): return x.encode('extpdfdoc') if isinstance(x,str) else x else: def pdfdocEnc(x): return x.encode('extpdfdoc') if isinstance(x,unicode) else x def format(element, document, toplevel=0): if isinstance(element,PDFObject): if not toplevel and hasattr(element, __RefOnly__): return document.Reference(element).format(document) else: f = element.format(document) if not rl_config.invariant and rl_config.pdfComments and hasattr(element, __Comment__): f = pdfdocEnc("%% %s\r\n" % element.__Comment__)+f return f elif type(element) in (float, int): #use a controlled number formatting routine #instead of str, so Jython/Python etc do not differ return pdfdocEnc(fp_str(element)) elif isBytes(element): return element elif isUnicode(element): return pdfdocEnc(element) else: return pdfdocEnc(str(element)) def xObjectName(externalname): return "FormXob.%s" % externalname # backwards compatibility formName = xObjectName # no encryption class NoEncryption: def encode(self, t): return t def prepare(self, document): # get ready to do encryption pass def register(self, objnum, version): # enter a new direct object pass def info(self): # the representation of self in file if any (should be None or PDFDict) return None class PDFObject(object): pass class DummyDoc(PDFObject): encrypt = NoEncryption() ### the global document structure manager class PDFDocument(PDFObject): # set this to define filters defaultStreamFilters = None encrypt = NoEncryption() # default no encryption def __init__(self, dummyoutline=0, compression=rl_config.pageCompression, invariant=rl_config.invariant, filename=None, pdfVersion=PDF_VERSION_DEFAULT, ): self._ID = None self.objectcounter = 0 self.shadingCounter = 0 self.inObject = None self.pageCounter = 1 # allow None value to be passed in to mean 'give system defaults' if invariant is None: self.invariant = rl_config.invariant else: self.invariant = invariant self.setCompression(compression) self._pdfVersion = pdfVersion # signature for creating PDF ID sig = self.signature = md5() sig.update(b"a reportlab document") if not self.invariant: cat = _getTimeStamp() else: cat = 946684800.0 cat = ascii(cat) sig.update(bytestr(cat)) # initialize with timestamp digest # mapping of internal identifier ("Page001") to PDF objectnumber and generation number (34, 0) self.idToObjectNumberAndVersion = {} # mapping of internal identifier ("Page001") to PDF object (PDFPage instance) self.idToObject = {} # internal id to file location self.idToOffset = {} # number to id self.numberToId = {} cat = self.Catalog = self._catalog = PDFCatalog() pages = self.Pages = PDFPages() cat.Pages = pages if dummyoutline: outlines = PDFOutlines0() else: outlines = PDFOutlines() self.Outlines = self.outline = outlines cat.Outlines = outlines self.info = PDFInfo() self.info.invariant = self.invariant #self.Reference(self.Catalog) #self.Reference(self.Info) self.fontMapping = {} #make an empty font dictionary DD = PDFDictionary({}) DD.__Comment__ = "The standard fonts dictionary" self.Reference(DD, BasicFonts) self.delayedFonts = [] def setCompression(self, onoff): # XXX: maybe this should also set self.defaultStreamFilters? self.compression = onoff def ensureMinPdfVersion(self, *keys): for k in keys: self._pdfVersion = max(self._pdfVersion, PDF_SUPPORT_VERSION[k]) def updateSignature(self, thing): if self._ID: return # but not if its used already! self.signature.update(bytestr(thing)) def ID(self): if self._ID: return self._ID digest = self.signature.digest() doc = DummyDoc() IDs = PDFString(digest,enc='raw').format(doc) self._ID = (b'\r\n % ReportLab generated PDF document -- digest (http://www.reportlab.com)\r\n [' +IDs+b' '+IDs+b']\r\n') return self._ID def SaveToFile(self, filename, canvas): if hasattr(getattr(filename, "write",None),'__call__'): myfile = 0 f = filename filename = makeFileName(getattr(filename,'name','')) else : myfile = 1 filename = makeFileName(filename) f = open(filename, "wb") data = self.GetPDFData(canvas) if isUnicode(data): data = data.encode('latin1') f.write(data) if myfile: f.close() import os if os.name=='mac': from reportlab.lib.utils import markfilename markfilename(filename) # do platform specific file junk if getattr(canvas,'_verbosity',None): print('saved %s' % (filename,)) def GetPDFData(self, canvas): # realize delayed fonts for fnt in self.delayedFonts: fnt.addObjects(self) # add info stuff to signature self.info.invariant = self.invariant self.info.digest(self.signature) ### later: maybe add more info to sig? # prepare outline self.Reference(self.Catalog) self.Reference(self.info) outline = self.outline outline.prepare(self, canvas) return self.format() def inPage(self): if self.inObject is not None: if self.inObject=="page": return raise ValueError("can't go in page already in object %s" % self.inObject) self.inObject = "page" def inForm(self): #if self.inObject not in ["form", None]: # raise ValueError("can't go in form already in object %s" % self.inObject) self.inObject = "form" def getInternalFontName(self, psfontname): fm = self.fontMapping if psfontname in fm: return fm[psfontname] else: try: # does pdfmetrics know about it? if so, add fontObj = pdfmetrics.getFont(psfontname) if fontObj._dynamicFont: raise PDFError("getInternalFontName(%s) called for a dynamic font" % repr(psfontname)) fontObj.addObjects(self) return fm[psfontname] except KeyError: raise PDFError("Font %s not known!" % repr(psfontname)) def thisPageName(self): return "Page"+repr(self.pageCounter) def thisPageRef(self): return PDFObjectReference(self.thisPageName()) def addPage(self, page): name = self.thisPageName() self.Reference(page, name) self.Pages.addPage(page) self.pageCounter += 1 self.inObject = None def addForm(self, name, form): # XXX should check that name is a legal PDF name if self.inObject != "form": self.inForm() self.Reference(form, xObjectName(name)) self.inObject = None def annotationName(self, externalname): return "Annot.%s"%externalname def addAnnotation(self, name, annotation): self.Reference(annotation, self.annotationName(name)) def refAnnotation(self, name): internalname = self.annotationName(name) return PDFObjectReference(internalname) def addShading(self, shading): name = "Sh%d" % self.shadingCounter self.Reference(shading, name) self.shadingCounter += 1 return name def addColor(self,cmyk): sname = cmyk.spotName if not sname: if cmyk.cyan==0 and cmyk.magenta==0 and cmyk.yellow==0: sname = 'BLACK' elif cmyk.black==0 and cmyk.magenta==0 and cmyk.yellow==0: sname = 'CYAN' elif cmyk.cyan==0 and cmyk.black==0 and cmyk.yellow==0: sname = 'MAGENTA' elif cmyk.cyan==0 and cmyk.magenta==0 and cmyk.black==0: sname = 'YELLOW' if not sname: raise ValueError("CMYK colour %r used without a spotName" % cmyk) else: cmyk = cmyk.clone(spotName = sname) name = PDFName(sname)[1:] if name not in self.idToObject: sep = PDFSeparationCMYKColor(cmyk).value() #PDFArray([/Separation /name /DeviceCMYK tint_tf]) self.Reference(sep,name) return name,sname def setTitle(self, title): if title is None: self.info.title = '(anonymous)' else: self.info.title = title def setAuthor(self, author): #allow resetting to clear it if author is None: self.info.author = '(anonymous)' else: self.info.author = author def setSubject(self, subject): #allow resetting to clear it if subject is None: self.info.subject = '(unspecified)' else: self.info.subject = subject def setCreator(self, creator): #allow resetting to clear it if creator is None: self.info.creator = '(unspecified)' else: self.info.creator = creator def setKeywords(self, keywords): #allow resetting to clear it but ensure it's a string if keywords is None: self.info.keywords = '' else: self.info.keywords = keywords def setDateFormatter(self, dateFormatter): self.info._dateFormatter = dateFormatter def getAvailableFonts(self): fontnames = list(self.fontMapping.keys()) from reportlab.pdfbase import _fontdata for name in _fontdata.standardFonts: if name not in fontnames: fontnames.append(name) fontnames.sort() return fontnames def format(self): self.encrypt.prepare(self) cat = self.Catalog info = self.info self.Reference(self.Catalog) self.Reference(self.info) encryptref = None encryptinfo = self.encrypt.info() if encryptinfo: encryptref = self.Reference(encryptinfo) counter = 0 ids = [] numbertoid = self.numberToId idToNV = self.idToObjectNumberAndVersion idToOb = self.idToObject idToOf = self.idToOffset counter += 1 if counter in numbertoid: id = numbertoid[counter] obj = idToOb[id] IO = PDFIndirectObject(id, obj) IOf = IO.format(self) if not rl_config.invariant and rl_config.pdfComments: try: classname = obj.__class__.__name__ except: classname = ascii(obj) File.add("%% %s: class %s \r\n" % (ascii(id), classname[:50])) offset = File.add(IOf) idToOf[id] = offset ids.append(id) else: done = 1 del self.__accum__ lno = len(numbertoid) if counter-1!=lno: raise ValueError("counter %s doesn't match number to id dictionary %s" %(counter, lno)) # now add the xref xref = PDFCrossReferenceTable() xref.addsection(0, ids) xreff = xref.format(self) xrefoffset = File.add(xreff) # now add the trailer trailer = PDFTrailer( startxref = xrefoffset, Size = lno+1, Root = self.Reference(cat), Info = self.Reference(info), Encrypt = encryptref, ID = self.ID(), ) trailerf = trailer.format(self) File.add(trailerf) for ds in getattr(self,'_digiSigs',[]): ds.sign(File) # return string format for pdf file return File.format(self) def hasForm(self, name): internalname = xObjectName(name) return internalname in self.idToObject def getFormBBox(self, name, boxType="MediaBox"): internalname = xObjectName(name) if internalname in self.idToObject: theform = self.idToObject[internalname] if hasattr(theform,'_extra_pageCatcher_info'): return theform._extra_pageCatcher_info[boxType] if isinstance(theform, PDFFormXObject): # internally defined form return theform.BBoxList() elif isinstance(theform, PDFStream): # externally defined form return list(theform.dictionary.dict[boxType].sequence) else: raise ValueError("I don't understand the form instance %s" % repr(name)) def getXObjectName(self, name): return xObjectName(name) def xobjDict(self, formnames): D = {} for name in formnames: internalname = xObjectName(name) reference = PDFObjectReference(internalname) D[internalname] = reference return PDFDictionary(D) def Reference(self, obj, name=None): Object if name is None and (not iob or obj.__class__ is PDFObjectReference): return obj if hasattr(obj, __InternalName__): intname = obj.__InternalName__ if name is not None and name!=intname: raise ValueError("attempt to reregister object %s with new name %s" % ( repr(intname), repr(name))) if intname not in idToObject: raise ValueError("object of type %s named as %s, but not registered" % (type(obj),ascii(intname))) return PDFObjectReference(intname) objectcounter = self.objectcounter = self.objectcounter+1 if name is None: name = "R"+repr(objectcounter) if name in idToObject: other = idToObject[name] if other!=obj: raise ValueError("redefining named object: "+repr(name)) return PDFObjectReference(name) if iob: obj.__InternalName__ = name self.idToObjectNumberAndVersion[name] = (objectcounter, 0) self.numberToId[objectcounter] = name idToObject[name] = obj return PDFObjectReference(name) Fnull = "null" class PDFText(PDFObject): def __init__(self, t): self.t = t def format(self, document): t = self.t if isUnicode(t): t = t.encode('utf-8') result = binascii.hexlify(document.encrypt.encode(t)) return b"<" + result + b">" def __str__(self): dummydoc = DummyDoc() return self.format(dummydoc) def PDFnumber(n): return n import re _re_cleanparens=re.compile('[^()]') del re def _isbalanced(s): s = _re_cleanparens.sub('',s) n = 0 for c in s: if c=='(': n+=1 else: n -= 1 if n<0: return 0 return not n and 1 or 0 def _checkPdfdoc(utext): try: utext.encode('pdfdoc') return 1 except UnicodeEncodeError as e: return 0 class PDFString(PDFObject): def __init__(self, s, escape=1, enc='auto'): if isinstance(s,PDFString): self.s = s.s self.escape = s.escape self.enc = s.enc else: self.s = s self.escape = escape self.enc = enc def format(self, document): s = self.s enc = getattr(self,'enc','auto') if (isBytes(s)): if enc is 'auto': try: u = s.decode(s.startswith(codecs.BOM_UTF16_BE) and 'utf16' or 'utf8') if _checkPdfdoc(u): s = u.encode('pdfdoc') else: s = codecs.BOM_UTF16_BE+u.encode('utf_16_be') except: try: s.decode('pdfdoc') except: stderr.write('Error in %s' % (repr(s),)) raise elif isUnicode(s): if enc is 'auto': if _checkPdfdoc(s): s = s.encode('pdfdoc') else: s = codecs.BOM_UTF16_BE+s.encode('utf_16_be') else: s = codecs.BOM_UTF16_BE+s.encode('utf_16_be') else: raise ValueError('PDFString argument must be str/unicode not %s' % type(s)) escape = getattr(self,'escape',1) if not isinstance(document.encrypt,NoEncryption): s = document.encrypt.encode(s) escape = 1 if escape: try: es = "(%s)" % escapePDF(s) except: raise ValueError("cannot escape %s %s" % (s, repr(s))) if escape&2: es = es.replace('\\012','\n') if escape&4 and _isbalanced(es): es = es.replace('\\(','(').replace('\\)',')') return pdfdocEnc(es) else: return b'(' + s + b')' def __str__(self): return "(%s)" % escapePDF(self.s) def PDFName(data,lo=chr(0x21),hi=chr(0x7e)): L = list(data) for i,c in enumerate(L): if c<lo or c>hi or c in "%()<>{}[]#": L[i] = "#"+hex(ord(c))[2:] return "/"+(''.join(L)) class PDFDictionary(PDFObject): multiline = True def __init__(self, dict=None): if dict is None: self.dict = {} else: self.dict = dict.copy() def __setitem__(self, name, value): self.dict[name] = value def __getitem__(self, a): return self.dict[a] def __contains__(self,a): return a in self.dict def Reference(self, name, document): self.dict[name] = document.Reference(self.dict[name]) def format(self, document,IND=b'\r\n '): dict = self.dict try: keys = list(dict.keys()) except: print(ascii(dict)) raise if not isinstance(dict,OrderedDict): keys.sort() L = [(format(PDFName(k),document)+b" "+format(dict[k],document)) for k in keys] if self.multiline and rl_config.pdfMultiLine: L = IND.join(L) else: t=L.insert for i in reversed(range(6, len(L), 6)): t(i,b'\r\n ') L = b" ".join(L) return b'<< '+L+b' >>' def copy(self): return PDFDictionary(self.dict) def normalize(self): D = self.dict K = [k for k in D.keys() if k.startswith('/')] for k in K: D[k[1:]] = D.pop(k) class checkPDFNames: def __init__(self,*names): self.names = list(map(PDFName,names)) def __call__(self,value): if not value.startswith('/'): value=PDFName(value) if value in self.names: return value def checkPDFBoolean(value): if value in ('true','false'): return value class CheckedPDFDictionary(PDFDictionary): validate = {} def __init__(self,dict=None,validate=None): PDFDictionary.__init__(self,dict) if validate: self.validate = validate def __setitem__(self,name,value): if name not in self.validate: raise ValueError('invalid key, %r' % name) cvalue = self.validate[name](value) if cvalue is None: raise ValueError('Bad value %r for key %r' % (value,name)) PDFDictionary.__setitem__(self,name,cvalue) class ViewerPreferencesPDFDictionary(CheckedPDFDictionary): validate=dict( HideToolbar=checkPDFBoolean, HideMenubar=checkPDFBoolean, HideWindowUI=checkPDFBoolean, FitWindow=checkPDFBoolean, CenterWindow=checkPDFBoolean, DisplayDocTitle=checkPDFBoolean, NonFullScreenPageMode=checkPDFNames(*'UseNone UseOutlines UseThumbs UseOC'.split()), Direction=checkPDFNames(*'L2R R2L'.split()), ViewArea=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()), ViewClip=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()), PrintArea=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()), PrintClip=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()), PrintScaling=checkPDFNames(*'None AppDefault'.split()), ) class PDFStreamFilterZCompress: pdfname = "FlateDecode" def encode(self, text): from reportlab.lib.utils import import_zlib zlib = import_zlib() if not zlib: raise ImportError("cannot z-compress zlib unavailable") if isUnicode(text): text = text.encode('utf8') return zlib.compress(text) def decode(self, encoded): from reportlab.lib.utils import import_zlib zlib = import_zlib() if not zlib: raise ImportError("cannot z-decompress zlib unavailable") return zlib.decompress(encoded) PDFZCompress = PDFStreamFilterZCompress() class PDFStreamFilterBase85Encode: pdfname = "ASCII85Decode" def encode(self, text): from reportlab.pdfbase.pdfutils import _wrap text = asciiBase85Encode(text) if rl_config.wrapA85: text = _wrap(text) return text def decode(self, text): return asciiBase85Decode(text) PDFBase85Encode = PDFStreamFilterBase85Encode() class PDFStream(PDFObject): ters=None): if dictionary is None: dictionary = PDFDictionary() self.dictionary = dictionary self.content = content self.filters = filters def format(self, document): dictionary = self.dictionary dictionary = PDFDictionary(dictionary.dict.copy()) content = self.content filters = self.filters if self.content is None: raise ValueError("stream content not set") if filters is None: filters = document.defaultStreamFilters if filters is not None and "Filter" not in dictionary.dict: # apply filters in reverse order listed rf = list(filters) rf.reverse() fnames = [] for f in rf: #print "*****************content:"; print repr(content[:200]) #print "*****************filter", f.pdfname content = f.encode(content) fnames.insert(0, PDFName(f.pdfname)) #print "*****************finally:"; print content[:200] #print "****** FILTERS", fnames #stop dictionary["Filter"] = PDFArray(fnames) # "stream encoding is done after all filters have been applied" content = document.encrypt.encode(content) fc = format(content, document) dictionary["Length"] = len(content) fd = format(dictionary, document) return fd+b'\r\nstream\r\n'+fc+b'endstream\r\n' def teststream(content=None): #content = "" # test if content is None: content = teststreamcontent content = content.strip() content = content.replace("\n", '\n\r') + '\n\r' S = PDFStream(content = content, filters=rl_config.useA85 and [PDFBase85Encode,PDFZCompress] or [PDFZCompress]) # nothing else needed... S.__Comment__ = "test stream" return S teststreamcontent = """ 1 0 0 1 0 0 cm BT /F9 12 Tf 14.4 TL ET 1.00 0.00 1.00 rg n 72.00 72.00 432.00 648.00 re B* """ class PDFArray(PDFObject): multiline = True def __init__(self, sequence): self.sequence = list(sequence) def References(self, document): self.sequence = list(map(document.Reference, self.sequence)) def format(self, document, IND=b'\r\n '): L = [format(e, document) for e in self.sequence] if self.multiline and rl_config.pdfMultiLine: L = IND.join(L) else: n=len(L) if n>10: # break up every 10 elements anyway t=L.insert for i in reversed(range(10, n, 10)): t(i,b'\r\n ') L = b' '.join(L) else: L = b' '.join(L) return b'[ ' + L + b' ]' class PDFArrayCompact(PDFArray): multiline=False class PDFIndirectObject(PDFObject): __RefOnly__ = 1 def __init__(self, name, content): self.name = name self.content = content def format(self, document): name = self.name n, v = document.idToObjectNumberAndVersion[name] # set encryption parameters document.encrypt.register(n, v) fcontent = format(self.content, document, toplevel=1) # yes this is at top level return (pdfdocEnc("%s %s obj\r\n"%(n,v)) +fcontent+ (b'' if fcontent.endswith(b'\r\n') else b'\r\n') +b'endobj\r\n') class PDFObjectReference(PDFObject): def __init__(self, name): self.name = name def format(self, document): try: return pdfdocEnc("%s %s R" % document.idToObjectNumberAndVersion[self.name]) except: raise KeyError("forward reference to %s not resolved upon final formatting" % repr(self.name)) class PDFFile(PDFObject): ### just accumulates strings: keeps track of current offset def __init__(self,pdfVersion=PDF_VERSION_DEFAULT): self.strings = [] self.write = self.strings.append self.offset = 0 ### chapter 5 # Following Ken Lunde's advice and the PDF spec, this includes # has run our PDF files through a dodgy Unicode conversion. self.add((pdfdocEnc("%%PDF-%s.%s" % pdfVersion) + b'\r\n%\223\214\213\236 ReportLab Generated PDF document http://www.reportlab.com\r\n' )) def closeOrReset(self): pass def add(self, s): s = pdfdocEnc(s) result = self.offset self.offset = result+len(s) self.write(s) return result def format(self, document): return b''.join(self.strings) XREFFMT = '%0.10d %0.5d n' class PDFCrossReferenceSubsection(PDFObject): def __init__(self, firstentrynumber, idsequence): self.firstentrynumber = firstentrynumber self.idsequence = idsequence def format(self, document): firstentrynumber = self.firstentrynumber idsequence = self.idsequence entries = list(idsequence) nentries = len(idsequence) # special case: object number 0 is always free taken = {} if firstentrynumber==0: taken[0] = "standard free entry" nentries = nentries+1 entries.insert(0, "0000000000 65535 f") idToNV = document.idToObjectNumberAndVersion idToOffset = document.idToOffset lastentrynumber = firstentrynumber+nentries-1 for id in idsequence: (num, version) = idToNV[id] if num in taken: raise ValueError("object number collision %s %s %s" % (num, repr(id), repr(taken[id]))) if num>lastentrynumber or num<firstentrynumber: raise ValueError("object number %s not in range %s..%s" % (num, firstentrynumber, lastentrynumber)) # compute position in list rnum = num-firstentrynumber taken[num] = id offset = idToOffset[id] entries[num] = XREFFMT % (offset, version) # now add the initial line firstline = "%s %s" % (firstentrynumber, nentries) entries.insert(0, firstline) # make sure it ends with \r\n entries.append("") return pdfdocEnc('\r\n'.join(entries)) class PDFCrossReferenceTable(PDFObject): def __init__(self): self.sections = [] def addsection(self, firstentry, ids): section = PDFCrossReferenceSubsection(firstentry, ids) self.sections.append(section) def format(self, document): sections = self.sections if not sections: raise ValueError("no crossref sections") L = [b"xref\r\n"] for s in self.sections: fs = format(s, document) L.append(fs) return pdfdocEnc(b''.join(L)) class PDFTrailer(PDFObject): def __init__(self, startxref, Size=None, Prev=None, Root=None, Info=None, ID=None, Encrypt=None): self.startxref = startxref if Size is None or Root is None: raise ValueError("Size and Root keys required") dict = self.dict = PDFDictionary() for (n,v) in [("Size", Size), ("Prev", Prev), ("Root", Root), ("Info", Info), ("ID", ID), ("Encrypt", Encrypt)]: if v is not None: dict[n] = v def format(self, document): fdict = format(self.dict, document) return b''.join([ b'trailer\r\n', fdict, b'\r\nstartxref\r\n', pdfdocEnc(str(self.startxref)), b'\r\n%%EOF\r\n', ] ) #### XXXX skipping incremental update, #### encryption #### chapter 6, doc structure class PDFCatalog(PDFObject): __Comment__ = "Document Root" __RefOnly__ = 1 # to override, set as attributes __Defaults__ = {"Type": PDFName("Catalog"), "PageMode": PDFName("UseNone"), "Lang": None, } __NoDefault__ = """ Dests Outlines Pages Threads AcroForm Names OpenAction PageMode URI ViewerPreferences PageLabels PageLayout JavaScript StructTreeRoot SpiderInfo""".split() __Refs__ = __NoDefault__ # make these all into references, if present def format(self, document): self.check_format(document) defaults = self.__Defaults__ Refs = self.__Refs__ D = {} for k,v in defaults.items(): v = getattr(self,k,v) if v is not None: D[k] = v for k in self.__NoDefault__: v = getattr(self,k,None) if v is not None: D[k] = v # force objects to be references where required for k in Refs: if k in D: #print"k is", k, "value", D[k] D[k] = document.Reference(D[k]) dict = PDFDictionary(D) return format(dict, document) def showOutline(self): self.setPageMode("UseOutlines") def showFullScreen(self): self.setPageMode("FullScreen") def setPageLayout(self,layout): if layout: self.PageLayout = PDFName(layout) def setPageMode(self,mode): if mode: self.PageMode = PDFName(mode) def check_format(self, document): pass class PDFPages(PDFCatalog): __Comment__ = "page tree" __RefOnly__ = 1 # note: could implement page attribute inheritance... __Defaults__ = {"Type": PDFName("Pages"), } __NoDefault__ = "Kids Count Parent".split() __Refs__ = ["Parent"] def __init__(self): self.pages = [] def __getitem__(self, item): return self.pages[item] def addPage(self, page): self.pages.append(page) def check_format(self, document): # convert all pages to page references pages = self.pages kids = PDFArray(pages) # make sure all pages are references kids.References(document) self.Kids = kids self.Count = len(pages) class PDFPage(PDFCatalog): __Comment__ = "Page dictionary" # all PDF attributes can be set explicitly # if this flag is set, the "usual" behavior will be suppressed Override_default_compilation = 0 __RefOnly__ = 1 __Defaults__ = {"Type": PDFName("Page"), # "Parent": PDFObjectReference(Pages), # no! use document.Pages } __NoDefault__ = """Parent MediaBox Resources Contents CropBox Rotate Thumb Annots B Dur Hid Trans AA PieceInfo LastModified SeparationInfo ArtBox TrimBox BleedBox ID PZ Trans""".split() __Refs__ = """Contents Parent ID""".split() pagewidth = 595 pageheight = 842 stream = None hasImages = 0 compression = 0 XObjects = None _colorsUsed = {} _shadingsUsed = {} Trans = None # transitionstring? # xobjects? # annotations def __init__(self): # set all nodefaults to None for name in self.__NoDefault__: setattr(self, name, None) def setCompression(self, onoff): self.compression = onoff def setStream(self, code): if self.Override_default_compilation: raise ValueError("overridden! must set stream explicitly") if isSeq(code): code = '\r\n'.join(code)+'\r\n' self.stream = code def setPageTransition(self, tranDict): self.Trans = PDFDictionary(tranDict) def check_format(self, document): # set up parameters unless usual behaviour is suppressed if self.Override_default_compilation: return self.MediaBox = self.MediaBox or PDFArray(self.Rotate in (90,270) and [0,0,self.pageheight,self.pagewidth] or [0, 0, self.pagewidth, self.pageheight]) if not self.Annots: self.Annots = None else: #print self.Annots #raise ValueError("annotations not reimplemented yet") if not isinstance(self.Annots,PDFObject): self.Annots = PDFArray(self.Annots) if not self.Contents: stream = self.stream if not stream: self.Contents = teststream() else: S = PDFStream() if self.compression: S.filters = rl_config.useA85 and [PDFBase85Encode, PDFZCompress] or [PDFZCompress] S.content = stream S.__Comment__ = "page stream" self.Contents = S if not self.Resources: resources = PDFResourceDictionary() # fonts! resources.basicFonts() if self.hasImages: resources.allProcs() else: resources.basicProcs() if self.XObjects: #print "XObjects", self.XObjects.dict resources.XObject = self.XObjects if self.ExtGState: resources.ExtGState = self.ExtGState resources.setShading(self._shadingUsed) resources.setColorSpace(self._colorsUsed) self.Resources = resources if not self.Parent: pages = document.Pages self.Parent = document.Reference(pages) #this code contributed by Christian Jacobs <cljacobsen@gmail.com> class DuplicatePageLabelPage(Exception): pass class PDFPageLabels(PDFCatalog): __comment__ = None __RefOnly__ = 0 __Defaults__ = {} __NoDefault__ = ["Nums"] __Refs__ = [] def __init__(self): self.labels = [] def addPageLabel(self, page, label): self.labels.append((page, label)) def format(self, document): try: self.labels.sort() except DuplicatePageLabelPage: tmp = sorted([x[0] for x in self.labels]) annotateException('\n\n!!!!! Duplicate PageLabel seen for pages %r' % list(set([x for x in tmp if tmp.count(x)>1]))) labels = [] for page, label in self.labels: labels.append(page) labels.append(label) self.Nums = PDFArray(labels) #PDFArray makes a copy with list() return PDFCatalog.format(self, document) class PDFPageLabel(PDFCatalog): __Comment__ = None __RefOnly__ = 0 __Defaults__ = {} __NoDefault__ = "Type S P St".split() __convertible__ = 'ARABIC ROMAN_UPPER ROMAN_LOWER LETTERS_UPPER LETTERS_LOWER' ARABIC = 'D' ROMAN_UPPER = 'R' ROMAN_LOWER = 'r' LETTERS_UPPER = 'A' LETTERS_LOWER = 'a' def __init__(self, style=None, start=None, prefix=None): if style: if style.upper() in self.__convertible__: style = getattr(self,style.upper()) self.S = PDFName(style) if start: self.St = PDFnumber(start) if prefix: self.P = PDFString(prefix) def __lt__(self,oth): if rl_config.errorOnDuplicatePageLabelPage: raise DuplicatePageLabelPage() return False #ends code contributed by Christian Jacobs <cljacobsen@gmail.com> def testpage(document): P = PDFPage() P.Contents = teststream() pages = document.Pages P.Parent = document.Reference(pages) P.MediaBox = PDFArray([0, 0, 595, 841]) resources = PDFResourceDictionary() resources.allProcs() # enable all procsets resources.basicFonts() P.Resources = resources pages.addPage(P) #### DUMMY OUTLINES IMPLEMENTATION FOR testing DUMMYOUTLINE = """ << /Count 0 /Type /Outlines >>""" class PDFOutlines0(PDFObject): __Comment__ = "TEST OUTLINE!" text = DUMMYOUTLINE.replace("\n", '\r\n') __RefOnly__ = 1 def format(self, document): return pdfdocEnc(self.text) class OutlineEntryObject(PDFObject): Title = Dest = Parent = Prev = Next = First = Last = Count = None def format(self, document): D = {} D["Title"] = PDFString(self.Title) D["Parent"] = self.Parent D["Dest"] = self.Dest for n in ("Prev", "Next", "First", "Last", "Count"): v = getattr(self, n) if v is not None: D[n] = v PD = PDFDictionary(D) return PD.format(document) class PDFOutlines(PDFObject): # first attempt, many possible features missing. #no init for now mydestinations = ready = None counter = 0 currentlevel = -1 # ie, no levels yet def __init__(self): self.destinationnamestotitles = {} self.destinationstotitles = {} self.levelstack = [] self.buildtree = [] self.closedict = {} # dictionary of "closed" destinations in the outline def addOutlineEntry(self, destinationname, level=0, title=None, closed=None): if destinationname is None and level!=0: raise ValueError("close tree must have level of 0") if not isinstance(level,int): raise ValueError("level must be integer, got %s" % type(level)) if level<0: raise ValueError("negative levels not allowed") if title is None: title = destinationname currentlevel = self.currentlevel stack = self.levelstack tree = self.buildtree # adjust currentlevel and stack to match level if level>currentlevel: if level>currentlevel+1: raise ValueError("can't jump from outline level %s to level %s, need intermediates (destinationname=%r, title=%r)" %(currentlevel, level, destinationname, title)) level = currentlevel = currentlevel+1 stack.append([]) while level<currentlevel: current = stack[-1] del stack[-1] previous = stack[-1] lastinprevious = previous[-1] if isinstance(lastinprevious,tuple): (name, sectionlist) = lastinprevious raise ValueError("cannot reset existing sections: " + repr(lastinprevious)) else: name = lastinprevious sectionlist = current previous[-1] = (name, sectionlist) currentlevel = currentlevel-1 if destinationname is None: return stack[-1].append(destinationname) self.destinationnamestotitles[destinationname] = title if closed: self.closedict[destinationname] = 1 self.currentlevel = level def setDestinations(self, destinationtree): self.mydestinations = destinationtree def format(self, document): D = {} D["Type"] = PDFName("Outlines") c = self.count D["Count"] = c if c!=0: D["First"] = self.first D["Last"] = self.last PD = PDFDictionary(D) return PD.format(document) def setNames(self, canvas, *nametree): desttree = self.translateNames(canvas, nametree) self.setDestinations(desttree) def setNameList(self, canvas, nametree): desttree = self.translateNames(canvas, nametree) self.setDestinations(desttree) def translateNames(self, canvas, object): destinationnamestotitles = self.destinationnamestotitles destinationstotitles = self.destinationstotitles closedict = self.closedict if isStr(object): if not isUnicode(object): object = object.decode('utf8') destination = canvas._bookmarkReference(object) title = object if object in destinationnamestotitles: title = destinationnamestotitles[object] else: destinationnamestotitles[title] = title destinationstotitles[destination] = title if object in closedict: closedict[destination] = 1 return {object: canvas._bookmarkReference(object)} if isSeq(object): L = [] for o in object: L.append(self.translateNames(canvas, o)) if isinstance(object,tuple): return tuple(L) return L raise TypeError("in outline, destination name must be string: got a %s"%type(object)) def prepare(self, document, canvas): if self.mydestinations is None: if self.levelstack: self.addOutlineEntry(None) destnames = self.levelstack[0] self.mydestinations = self.translateNames(canvas, destnames) else: self.first = self.last = None self.count = 0 self.ready = 1 return self.count = count(self.mydestinations, self.closedict) (self.first, self.last) = self.maketree(document, self.mydestinations, toplevel=1) self.ready = 1 def maketree(self, document, destinationtree, Parent=None, toplevel=0): if toplevel: levelname = "Outline" Parent = document.Reference(document.Outlines) else: self.count = self.count+1 levelname = "Outline.%s" % self.count if Parent is None: raise ValueError("non-top level outline elt parent must be specified") if not isSeq(destinationtree): raise ValueError("destinationtree must be list or tuple, got %s") nelts = len(destinationtree) lastindex = nelts-1 lastelt = firstref = lastref = None destinationnamestotitles = self.destinationnamestotitles closedict = self.closedict for index in range(nelts): eltobj = OutlineEntryObject() eltobj.Parent = Parent eltname = "%s.%s" % (levelname, index) eltref = document.Reference(eltobj, eltname) if lastelt is not None: lastelt.Next = eltref eltobj.Prev = lastref if firstref is None: firstref = eltref lastref = eltref lastelt = eltobj lastref = eltref elt = destinationtree[index] if isinstance(elt,dict): leafdict = elt elif isinstance(elt,tuple): try: (leafdict, subsections) = elt except: raise ValueError("destination tree elt tuple should have two elts, got %s" % len(elt)) eltobj.Count = count(subsections, closedict) (eltobj.First, eltobj.Last) = self.maketree(document, subsections, eltref) else: raise ValueError("destination tree elt should be dict or tuple, got %s" % type(elt)) try: [(Title, Dest)] = list(leafdict.items()) except: raise ValueError("bad outline leaf dictionary, should have one entry "+bytestr(elt)) eltobj.Title = destinationnamestotitles[Title] eltobj.Dest = Dest if isinstance(elt,tuple) and Dest in closedict: eltobj.Count = -eltobj.Count return (firstref, lastref) def count(tree, closedict=None): from operator import add if isinstance(tree,tuple): (leafdict, subsections) = tree [(Title, Dest)] = list(leafdict.items()) if closedict and Dest in closedict: return 1 if isSeq(tree): counts = [] for e in tree: counts.append(count(e, closedict)) return sum(counts) return 1 class PDFInfo(PDFObject): producer = "ReportLab PDF Library - www.reportlab.com" creator = "ReportLab PDF Library - www.reportlab.com" title = "untitled" author = "anonymous" subject = "unspecified" keywords = "" _dateFormatter = None def __init__(self): self.invariant = rl_config.invariant self.trapped = 'False' def digest(self, md5object): for x in (self.title, self.author, self.subject, self.keywords): md5object.update(bytestr(x)) def format(self, document): D = {} D["Title"] = PDFString(self.title) D["Author"] = PDFString(self.author) D['ModDate'] = D["CreationDate"] = PDFDate(invariant=self.invariant,dateFormatter=self._dateFormatter) D["Producer"] = PDFString(self.producer) D["Creator"] = PDFString(self.creator) D["Subject"] = PDFString(self.subject) D["Keywords"] = PDFString(self.keywords) D["Trapped"] = PDFName(self.trapped) PD = PDFDictionary(D) return PD.format(document) def copy(self): thing = self.__klass__() for k, v in self.__dict__.items(): setattr(thing, k, v) return thing class Annotation(PDFObject): defaults = [("Type", PDFName("Annot"),)] required = ("Type", "Rect", "Contents", "Subtype") permitted = required+( "Border", "C", "T", "M", "F", "H", "BS", "AA", "AS", "Popup", "P", "AP") def cvtdict(self, d, escape=1): Rect = d["Rect"] if not isStr(Rect): d["Rect"] = PDFArray(Rect) d["Contents"] = PDFString(d["Contents"],escape) return d def AnnotationDict(self, **kw): if 'escape' in kw: escape = kw['escape'] del kw['escape'] else: escape = 1 d = {} for (name,val) in self.defaults: d[name] = val d.update(kw) for name in self.required: if name not in d: raise ValueError("keyword argument %s missing" % name) d = self.cvtdict(d,escape=escape) permitted = self.permitted for name in d.keys(): if name not in permitted: raise ValueError("bad annotation dictionary name %s" % name) return PDFDictionary(d) def Dict(self): raise ValueError("DictString undefined for virtual superclass Annotation, must overload") def format(self, document): D = self.Dict() return D.format(document) class TextAnnotation(Annotation): permitted = Annotation.permitted + ( "Open", "Name") def __init__(self, Rect, Contents, **kw): self.Rect = Rect self.Contents = Contents self.otherkw = kw def Dict(self): d = {} d.update(self.otherkw) d["Rect"] = self.Rect d["Contents"] = self.Contents d["Subtype"] = "/Text" return self.AnnotationDict(**d) class FreeTextAnnotation(Annotation): permitted = Annotation.permitted + ("DA",) def __init__(self, Rect, Contents, DA, **kw): self.Rect = Rect self.Contents = Contents self.DA = DA self.otherkw = kw def Dict(self): d = {} d.update(self.otherkw) d["Rect"] = self.Rect d["Contents"] = self.Contents d["DA"] = self.DA d["Subtype"] = "/FreeText" return self.AnnotationDict(**d) class LinkAnnotation(Annotation): permitted = Annotation.permitted + ( "Dest", "A", "PA") def __init__(self, Rect, Contents, Destination, Border="[0 0 1]", **kw): self.Border = Border self.Rect = Rect self.Contents = Contents self.Destination = Destination self.otherkw = kw def dummyDictString(self): return """ << /Type /Annot /Subtype /Link /Rect [71 717 190 734] /Border [16 16 1] /Dest [23 0 R /Fit] >> """ def Dict(self): d = {} d.update(self.otherkw) d["Border"] = self.Border d["Rect"] = self.Rect d["Contents"] = self.Contents d["Subtype"] = "/Link" d["Dest"] = self.Destination return self.AnnotationDict(**d) class HighlightAnnotation(Annotation): permitted = Annotation.permitted + ("QuadPoints", ) def __init__(self, Rect, Contents, QuadPoints, Color=[0.83, 0.89, 0.95], **kw): self.Rect = Rect self.Contents = Contents self.otherkw = kw self.QuadPoints = QuadPoints self.Color = Color def cvtdict(self, d, escape=1): Rect = d["Rect"] Quad = d["QuadPoints"] Color = d["C"] if not isinstance(Rect, str): d["Rect"] = PDFArray(Rect).format(d, IND=b" ") if not isinstance(Quad, str): d["QuadPoints"] = PDFArray(Quad).format(d, IND=b" ") if not isinstance(Color, str): d["C"] = PDFArray(Color).format(d, IND=b" ") d["Contents"] = PDFString(d["Contents"], escape) return d def Dict(self): d = {} d.update(self.otherkw) d["Rect"] = self.Rect d["Contents"] = self.Contents d["Subtype"] = "/Highlight" d["QuadPoints"] = self.QuadPoints d["C"] = self.Color return self.AnnotationDict(**d) def rect_to_quad(Rect): return [Rect[0], Rect[1], Rect[2], Rect[1], Rect[0], Rect[3], Rect[2], Rect[3]] class PDFRectangle(PDFObject): def __init__(self, llx, lly, urx, ury): self.llx, self.lly, self.ulx, self.ury = llx, lly, urx, ury def format(self, document): A = PDFArray([self.llx, self.lly, self.ulx, self.ury]) return format(A, document) _NOWT=None def _getTimeStamp(): global _NOWT if not _NOWT: import time _NOWT = time.time() return _NOWT class PDFDate(PDFObject): def __init__(self, invariant=rl_config.invariant, dateFormatter=None): if invariant: now = (2000,1,1,0,0,0,0) self.dhh = 0 self.dmm = 0 else: import time now = tuple(time.localtime(_getTimeStamp())[:6]) from time import timezone self.dhh = int(timezone / (3600.0)) self.dmm = (timezone % 3600) % 60 self.date = now[:6] self.dateFormatter = dateFormatter def format(self, doc): dfmt = self.dateFormatter or ( lambda yyyy,mm,dd,hh,m,s: "D:%04d%02d%02d%02d%02d%02d%+03d'%02d'" % (yyyy,mm,dd,hh,m,s,self.dhh,self.dmm)) return format(PDFString(dfmt(*self.date)), doc) class Destination(PDFObject): representation = format = page = None def __init__(self,name): self.name = name self.fmt = self.page = None def format(self, document): f = self.fmt if f is None: raise ValueError("format not resolved, probably missing URL scheme or undefined destination target for '%s'" % self.name) p = self.page if p is None: raise ValueError("Page not bound, probably missing URL scheme or undefined destination target for '%s'" % self.name) f.page = p return f.format(document) def xyz(self, left, top, zoom): self.fmt = PDFDestinationXYZ(None, left, top, zoom) def fit(self): self.fmt = PDFDestinationFit(None) def fitb(self): self.fmt = PDFDestinationFitB(None) def fith(self, top): self.fmt = PDFDestinationFitH(None,top) def fitv(self, left): self.fmt = PDFDestinationFitV(None, left) def fitbh(self, top): self.fmt = PDFDestinationFitBH(None, top) def fitbv(self, left): self.fmt = PDFDestinationFitBV(None, left) def fitr(self, left, bottom, right, top): self.fmt = PDFDestinationFitR(None, left, bottom, right, top) def setPage(self, page): self.page = page (PDFObject): typename = "XYZ" def __init__(self, page, left, top, zoom): self.page = page self.top = top self.zoom = zoom self.left = left def format(self, document): pageref = document.Reference(self.page) A = PDFArray( [ pageref, PDFName(self.typename), self.left, self.top, self.zoom ] ) return format(A, document) class PDFDestinationFit(PDFObject): typename = "Fit" def __init__(self, page): self.page = page def format(self, document): pageref = document.Reference(self.page) A = PDFArray( [ pageref, PDFName(self.typename) ] ) return format(A, document) class PDFDestinationFitB(PDFDestinationFit): typename = "FitB" class PDFDestinationFitH(PDFObject): typename = "FitH" def __init__(self, page, top): self.page = page; self.top=top def format(self, document): pageref = document.Reference(self.page) A = PDFArray( [ pageref, PDFName(self.typename), self.top ] ) return format(A, document) class PDFDestinationFitBH(PDFDestinationFitH): typename = "FitBH" class PDFDestinationFitV(PDFObject): typename = "FitV" def __init__(self, page, left): self.page = page; self.left=left def format(self, document): pageref = document.Reference(self.page) A = PDFArray( [ pageref, PDFName(self.typename), self.left ] ) return format(A, document) class PDFDestinationFitBV(PDFDestinationFitV): typename = "FitBV" class PDFDestinationFitR(PDFObject): typename = "FitR" def __init__(self, page, left, bottom, right, top): self.page = page; self.left=left; self.bottom=bottom; self.right=right; self.top=top def format(self, document): pageref = document.Reference(self.page) A = PDFArray( [ pageref, PDFName(self.typename), self.left, self.bottom, self.right, self.top] ) return format(A, document) class PDFResourceDictionary(PDFObject): def __init__(self): self.ColorSpace = {} self.XObject = {} self.ExtGState = {} self.Font = {} self.Pattern = {} self.ProcSet = [] self.Properties = {} self.Shading = {} self.basicProcs() stdprocs = [PDFName(s) for s in "PDF Text ImageB ImageC ImageI".split()] dict_attributes = ("ColorSpace", "XObject", "ExtGState", "Font", "Pattern", "Properties", "Shading") def allProcs(self): self.ProcSet = self.stdprocs def basicProcs(self): self.ProcSet = self.stdprocs[:2] def basicFonts(self): self.Font = PDFObjectReference(BasicFonts) def setColorSpace(self,colorsUsed): for c,s in colorsUsed.items(): self.ColorSpace[s] = PDFObjectReference(c) def setShading(self,shadingUsed): for c,s in shadingUsed.items(): self.Shading[s] = PDFObjectReference(c) def format(self, document): D = {} for dname in self.dict_attributes: v = getattr(self, dname) if isinstance(v,dict): if v: dv = PDFDictionary(v) D[dname] = dv else: D[dname] = v v = self.ProcSet dname = "ProcSet" if isSeq(v): if v: dv = PDFArray(v) D[dname] = dv else: D[dname] = v DD = PDFDictionary(D) return format(DD, document) ', '/G':'DeviceGray', '/CMYK':'DeviceCMYK'}[words[7]] self.bitsPerComponent = 8 self._filters = 'FlateDecode', #'Fl' if IMG: self._checkTransparency(IMG[0]) elif self.mask=='auto': self.mask = None self.streamContent = ''.join(imagedata[3:-1]) def _checkTransparency(self,im): if self.mask=='auto': if im._dataA: self.mask = None self._smask = PDFImageXObject(_digester(im._dataA.getRGBData()),im._dataA,mask=None) self._smask._decode = [0,1] else: tc = im.getTransparent() if tc: self.mask = (tc[0], tc[0], tc[1], tc[1], tc[2], tc[2]) else: self.mask = None elif hasattr(self.mask,'rgb'): _ = self.mask.rgb() self.mask = _[0],_[0],_[1],_[1],_[2],_[2] def loadImageFromSRC(self, im): fp = im.jpeg_fh() if fp: self.loadImageFromJPEG(fp) else: zlib = import_zlib() if not zlib: return self.width, self.height = im.getSize() raw = im.getRGBData() #assert len(raw) == self.width*self.height, "Wrong amount of data for image expected %sx%s=%s got %s" % (self.width,self.height,self.width*self.height,len(raw)) self.streamContent = zlib.compress(raw) if rl_config.useA85: self.streamContent = asciiBase85Encode(self.streamContent) self._filters = 'ASCII85Decode','FlateDecode' #'A85','Fl' else: self._filters = 'FlateDecode', #'Fl' self.colorSpace= _mode2CS[im.mode] self.bitsPerComponent = 8 self._checkTransparency(im) def format(self, document): S = PDFStream(content = self.streamContent) dict = S.dictionary dict["Type"] = PDFName("XObject") dict["Subtype"] = PDFName("Image") dict["Width"] = self.width dict["Height"] = self.height dict["BitsPerComponent"] = self.bitsPerComponent dict["ColorSpace"] = PDFName(self.colorSpace) if self.colorSpace=='DeviceCMYK' and getattr(self,'_dotrans',0): dict["Decode"] = PDFArray([1,0,1,0,1,0,1,0]) elif getattr(self,'_decode',None): dict["Decode"] = PDFArray(self._decode) dict["Filter"] = PDFArray(map(PDFName,self._filters)) dict["Length"] = len(self.streamContent) if self.mask: dict["Mask"] = PDFArray(self.mask) if getattr(self,'smask',None): dict["SMask"] = self.smask return S.format(document) class PDFSeparationCMYKColor: def __init__(self, cmyk): from reportlab.lib.colors import CMYKColor if not isinstance(cmyk,CMYKColor): raise ValueError('%s needs a CMYKColor argument' % self.__class__.__name__) elif not cmyk.spotName: raise ValueError('%s needs a CMYKColor argument with a spotName' % self.__class__.__name__) self.cmyk = cmyk def _makeFuncPS(self): R = [].append for i,v in enumerate(self.cmyk.cmyk()): v=float(v) if i==3: if v==0.0: R('pop') R('0.0') else: R(str(v)) R('mul') else: if v==0: R('0.0') else: R('dup') R(str(v)) R('mul') R('exch') return '{%s}' % (' '.join(R.__self__)) def value(self): return PDFArrayCompact(( PDFName('Separation'), PDFName(self.cmyk.spotName), PDFName('DeviceCMYK'), PDFStream( dictionary=PDFDictionary(dict( FunctionType=4, Domain=PDFArrayCompact((0,1)), Range=PDFArrayCompact((0,1,0,1,0,1,0,1)) )), content=self._makeFuncPS(), filters=None,#[PDFBase85Encode, PDFZCompress], ) )) class PDFFunction(PDFObject): defaults = [] required = ("FunctionType", "Domain") permitted = required+("Range",) def FunctionDict(self, **kw): d = {} for (name,val) in self.defaults: d[name] = val d.update(kw) for name in self.required: if name not in d: raise ValueError("keyword argument %s missing" % name) permitted = self.permitted for name in d.keys(): if name not in permitted: raise ValueError("bad annotation dictionary name %s" % name) return PDFDictionary(d) def Dict(self, document): raise ValueError("Dict undefined for virtual superclass PDFShading, must overload") # but usually #return self.FunctionDict(self, ...) def format(self, document): D = self.Dict(document) return D.format(document) class PDFExponentialFunction(PDFFunction): defaults = PDFFunction.defaults + [("Domain", PDFArrayCompact((0.0, 1.0)))] required = PDFFunction.required + ("N",) permitted = PDFFunction.permitted + ("C0", "C1", "N") def __init__(self, C0, C1, N, **kw): self.C0 = C0 self.C1 = C1 self.N = N self.otherkw = kw def Dict(self, document): d = {} d.update(self.otherkw) d["FunctionType"] = 2 d["C0"] = PDFArrayCompact(self.C0) d["C1"] = PDFArrayCompact(self.C1) d["N"] = self.N return self.FunctionDict(**d) class PDFStitchingFunction(PDFFunction): required = PDFFunction.required + ("Functions", "Bounds", "Encode") permitted = PDFFunction.permitted + ("Functions", "Bounds", "Encode") def __init__(self, Functions, Bounds, Encode, **kw): self.Functions = Functions self.Bounds = Bounds self.Encode = Encode self.otherkw = kw def Dict(self, document): d = {} d.update(self.otherkw) d["FunctionType"] = 3 d["Functions"] = PDFArray([document.Reference(x) for x in self.Functions]) d["Bounds"] = PDFArray(self.Bounds) d["Encode"] = PDFArray(self.Encode) return self.FunctionDict(**d) class PDFShading(PDFObject): required = ("ShadingType", "ColorSpace") permitted = required+("Background", "BBox", "AntiAlias") def ShadingDict(self, **kw): d = {} d.update(kw) for name in self.required: if name not in d: raise ValueError("keyword argument %s missing" % name) permitted = self.permitted for name in d.keys(): if name not in permitted: raise ValueError("bad annotation dictionary name %s" % name) return PDFDictionary(d) def Dict(self, document): raise ValueError("Dict undefined for virtual superclass PDFShading, must overload") # but usually #return self.ShadingDict(self, ...) def format(self, document): D = self.Dict(document) return D.format(document) class PDFFunctionShading(PDFShading): required = PDFShading.required + ("Function",) permitted = PDFShading.permitted + ("Domain", "Matrix", "Function") def __init__(self, Function, ColorSpace, **kw): self.Function = Function self.ColorSpace = ColorSpace self.otherkw = kw def Dict(self, document): d = {} d.update(self.otherkw) d["ShadingType"] = 1 d["ColorSpace"] = PDFName(self.ColorSpace) d["Function"] = document.Reference(self.Function) return self.ShadingDict(**d) class PDFAxialShading(PDFShading): required = PDFShading.required + ("Coords", "Function") permitted = PDFShading.permitted + ( "Coords", "Domain", "Function", "Extend") def __init__(self, x0, y0, x1, y1, Function, ColorSpace, **kw): self.Coords = (x0, y0, x1, y1) self.Function = Function self.ColorSpace = ColorSpace self.otherkw = kw def Dict(self, document): d = {} d.update(self.otherkw) d["ShadingType"] = 2 d["ColorSpace"] = PDFName(self.ColorSpace) d["Coords"] = PDFArrayCompact(self.Coords) d["Function"] = document.Reference(self.Function) return self.ShadingDict(**d) class PDFRadialShading(PDFShading): required = PDFShading.required + ("Coords", "Function") permitted = PDFShading.permitted + ( "Coords", "Domain", "Function", "Extend") def __init__(self, x0, y0, r0, x1, y1, r1, Function, ColorSpace, **kw): self.Coords = (x0, y0, r0, x1, y1, r1) self.Function = Function self.ColorSpace = ColorSpace self.otherkw = kw def Dict(self, document): d = {} d.update(self.otherkw) d["ShadingType"] = 3 d["ColorSpace"] = PDFName(self.ColorSpace) d["Coords"] = PDFArrayCompact(self.Coords) d["Function"] = document.Reference(self.Function) return self.ShadingDict(**d) if __name__=="__main__": print("There is no script interpretation for pdfdoc.")
true
true
1c45ebcafc988d6417656fc2f57c14e952094419
591
py
Python
kombu/asynchronous/http/__init__.py
kaiix/kombu
580b5219cc50cad278c4b664d0e0f85e37a5e9ea
[ "BSD-3-Clause" ]
1,920
2015-01-03T15:43:23.000Z
2022-03-30T19:30:35.000Z
kombu/asynchronous/http/__init__.py
kaiix/kombu
580b5219cc50cad278c4b664d0e0f85e37a5e9ea
[ "BSD-3-Clause" ]
949
2015-01-02T18:56:00.000Z
2022-03-31T23:14:59.000Z
kombu/asynchronous/http/__init__.py
kaiix/kombu
580b5219cc50cad278c4b664d0e0f85e37a5e9ea
[ "BSD-3-Clause" ]
833
2015-01-07T23:56:35.000Z
2022-03-31T22:04:11.000Z
from kombu.asynchronous import get_event_loop from .base import Headers, Request, Response __all__ = ('Client', 'Headers', 'Response', 'Request') def Client(hub=None, **kwargs): """Create new HTTP client.""" from .curl import CurlClient return CurlClient(hub, **kwargs) def get_client(hub=None, **kwargs): """Get or create HTTP client bound to the current event loop.""" hub = hub or get_event_loop() try: return hub._current_http_client except AttributeError: client = hub._current_http_client = Client(hub, **kwargs) return client
26.863636
68
0.685279
from kombu.asynchronous import get_event_loop from .base import Headers, Request, Response __all__ = ('Client', 'Headers', 'Response', 'Request') def Client(hub=None, **kwargs): from .curl import CurlClient return CurlClient(hub, **kwargs) def get_client(hub=None, **kwargs): hub = hub or get_event_loop() try: return hub._current_http_client except AttributeError: client = hub._current_http_client = Client(hub, **kwargs) return client
true
true
1c45ec2cae11560444aa0d63d936a8e946da8104
1,851
py
Python
setup.py
lassejaco/pretix-eth-payment-plugin
be514a7387de8399cb11c9dd8971f286ccc9a72c
[ "Apache-2.0" ]
null
null
null
setup.py
lassejaco/pretix-eth-payment-plugin
be514a7387de8399cb11c9dd8971f286ccc9a72c
[ "Apache-2.0" ]
null
null
null
setup.py
lassejaco/pretix-eth-payment-plugin
be514a7387de8399cb11c9dd8971f286ccc9a72c
[ "Apache-2.0" ]
null
null
null
import os from distutils.command.build import build # type: ignore from setuptools import setup, find_packages with open(os.path.join(os.path.dirname(__file__), 'README.md'), encoding='utf-8') as f: long_description = f.read() class CustomBuild(build): def run(self): from django.core import management management.call_command('compilemessages', verbosity=1) build.run(self) cmdclass = { 'build': CustomBuild } extras_require = { 'test': [ 'pytest>=5.1,<6', 'pytest-django>=3.5,<4', ], 'lint': [ 'flake8>=3.7,<4', 'mypy==0.720', ], 'dev': [ 'tox>=3.14.5,<4', ], } extras_require['dev'] = ( extras_require['dev'] + extras_require['test'] + extras_require['lint'] ) setup( name='pretix-eth-payment-plugin', version='2.0.4-dev', description='Ethereum payment provider plugin for pretix software', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/esPass/pretix-eth-payment-plugin', author='Pretix Ethereum Plugin Developers', author_email='pretix-eth-payment-plugin@ethereum.org', license='Apache Software License', install_requires=[ "pretix>=3.8.0", "web3>=5.7.0", "eth-abi>=2.1.1,<3", "eth-typing>=2.2.1,<3", "eth-utils>=1.8.4,<2", "eth-hash[pycryptodome]>=0.3.1,<0.4", # Requests requires urllib3 <1.26.0. Can delete this later after # requests gets its act together. "urllib3<1.26.0", ], python_requires='>=3.6, <4', extras_require=extras_require, packages=find_packages(exclude=['tests', 'tests.*']), include_package_data=True, cmdclass=cmdclass, entry_points=""" [pretix.plugin] pretix_eth=pretix_eth:PretixPluginMeta """, )
24.68
87
0.622907
import os from distutils.command.build import build from setuptools import setup, find_packages with open(os.path.join(os.path.dirname(__file__), 'README.md'), encoding='utf-8') as f: long_description = f.read() class CustomBuild(build): def run(self): from django.core import management management.call_command('compilemessages', verbosity=1) build.run(self) cmdclass = { 'build': CustomBuild } extras_require = { 'test': [ 'pytest>=5.1,<6', 'pytest-django>=3.5,<4', ], 'lint': [ 'flake8>=3.7,<4', 'mypy==0.720', ], 'dev': [ 'tox>=3.14.5,<4', ], } extras_require['dev'] = ( extras_require['dev'] + extras_require['test'] + extras_require['lint'] ) setup( name='pretix-eth-payment-plugin', version='2.0.4-dev', description='Ethereum payment provider plugin for pretix software', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/esPass/pretix-eth-payment-plugin', author='Pretix Ethereum Plugin Developers', author_email='pretix-eth-payment-plugin@ethereum.org', license='Apache Software License', install_requires=[ "pretix>=3.8.0", "web3>=5.7.0", "eth-abi>=2.1.1,<3", "eth-typing>=2.2.1,<3", "eth-utils>=1.8.4,<2", "eth-hash[pycryptodome]>=0.3.1,<0.4", "urllib3<1.26.0", ], python_requires='>=3.6, <4', extras_require=extras_require, packages=find_packages(exclude=['tests', 'tests.*']), include_package_data=True, cmdclass=cmdclass, entry_points=""" [pretix.plugin] pretix_eth=pretix_eth:PretixPluginMeta """, )
true
true
1c45ef8254822d3c204624c76142cdc54dcca2e2
457
py
Python
dedomeno/houses/migrations/0098_auto_20170117_1650.py
ginopalazzo/dedomeno
e43df365849102016c8819b2082d2cde9109360f
[ "MIT" ]
38
2018-03-19T12:52:17.000Z
2022-02-17T14:45:57.000Z
dedomeno/houses/migrations/0098_auto_20170117_1650.py
ginopalazzo/dedomeno
e43df365849102016c8819b2082d2cde9109360f
[ "MIT" ]
7
2020-02-11T23:01:40.000Z
2020-08-06T13:30:58.000Z
dedomeno/houses/migrations/0098_auto_20170117_1650.py
ginopalazzo/dedomeno
e43df365849102016c8819b2082d2cde9109360f
[ "MIT" ]
12
2019-02-23T22:10:34.000Z
2022-03-24T12:01:38.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.10.4 on 2017-01-17 15:50 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('houses', '0097_property_online'), ] operations = [ migrations.AlterField( model_name='realestate', name='desc', field=models.TextField(blank=True, null=True), ), ]
21.761905
58
0.61488
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('houses', '0097_property_online'), ] operations = [ migrations.AlterField( model_name='realestate', name='desc', field=models.TextField(blank=True, null=True), ), ]
true
true
1c45f025c99e2b8c21037589076efa5e71227813
17,668
py
Python
graphsage/unsupervised_train.py
LiTszOn/GraphSAGE
dbeb50d52e8d242b3c4ad3e4264c168a2c406e70
[ "MIT" ]
null
null
null
graphsage/unsupervised_train.py
LiTszOn/GraphSAGE
dbeb50d52e8d242b3c4ad3e4264c168a2c406e70
[ "MIT" ]
null
null
null
graphsage/unsupervised_train.py
LiTszOn/GraphSAGE
dbeb50d52e8d242b3c4ad3e4264c168a2c406e70
[ "MIT" ]
null
null
null
from __future__ import division from __future__ import print_function import os import time import tensorflow as tf import numpy as np from graphsage.models import SampleAndAggregate, SAGEInfo, Node2VecModel from graphsage.minibatch import EdgeMinibatchIterator from graphsage.neigh_samplers import UniformNeighborSampler from graphsage.utils import load_data os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # Set random seed seed = 123 np.random.seed(seed) tf.set_random_seed(seed) # Settings flags = tf.app.flags FLAGS = flags.FLAGS tf.app.flags.DEFINE_boolean('log_device_placement', False, """Whether to log device placement.""") #core params.. flags.DEFINE_string('model', 'graphsage', 'model names. See README for possible values.') flags.DEFINE_float('learning_rate', 0.00001, 'initial learning rate.') flags.DEFINE_string("model_size", "small", "Can be big or small; model specific def'ns") flags.DEFINE_string('train_prefix', '', 'name of the object file that stores the training data. must be specified.') # left to default values in main experiments flags.DEFINE_integer('epochs', 1, 'number of epochs to train.') flags.DEFINE_float('dropout', 0.0, 'dropout rate (1 - keep probability).') flags.DEFINE_float('weight_decay', 0.0, 'weight for l2 loss on embedding matrix.') flags.DEFINE_integer('max_degree', 100, 'maximum node degree.') flags.DEFINE_integer('samples_1', 25, 'number of samples in layer 1') flags.DEFINE_integer('samples_2', 10, 'number of users samples in layer 2') flags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)') flags.DEFINE_integer('dim_2', 128, 'Size of output dim (final is 2x this, if using concat)') flags.DEFINE_boolean('random_context', True, 'Whether to use random context or direct edges') flags.DEFINE_integer('neg_sample_size', 20, 'number of negative samples') flags.DEFINE_integer('batch_size', 512, 'minibatch size.') flags.DEFINE_integer('n2v_test_epochs', 1, 'Number of new SGD epochs for n2v.') flags.DEFINE_integer('identity_dim', 0, 'Set to positive value to use identity embedding features of that dimension. Default 0.') #logging, saving, validation settings etc. flags.DEFINE_boolean('save_embeddings', True, 'whether to save embeddings for all nodes after training') flags.DEFINE_string('base_log_dir', '.', 'base directory for logging and saving embeddings') flags.DEFINE_integer('validate_iter', 5000, "how often to run a validation minibatch.") flags.DEFINE_integer('validate_batch_size', 256, "how many nodes per validation sample.") flags.DEFINE_integer('gpu', 1, "which gpu to use.") flags.DEFINE_integer('print_every', 50, "How often to print training info.") flags.DEFINE_integer('max_total_steps', 10**10, "Maximum total number of iterations") os.environ["CUDA_VISIBLE_DEVICES"]=str(FLAGS.gpu) GPU_MEM_FRACTION = 0.8 def log_dir(): log_dir = FLAGS.base_log_dir + "/unsup-" + FLAGS.train_prefix.split("/")[-2] log_dir += "/{model:s}_{model_size:s}_{lr:0.6f}/".format( model=FLAGS.model, model_size=FLAGS.model_size, lr=FLAGS.learning_rate) if not os.path.exists(log_dir): os.makedirs(log_dir) return log_dir # Define model evaluation function def evaluate(sess, model, minibatch_iter, size=None): t_test = time.time() feed_dict_val = minibatch_iter.val_feed_dict(size) outs_val = sess.run([model.loss, model.ranks, model.mrr], feed_dict=feed_dict_val) return outs_val[0], outs_val[1], outs_val[2], (time.time() - t_test) def incremental_evaluate(sess, model, minibatch_iter, size): t_test = time.time() finished = False val_losses = [] val_mrrs = [] iter_num = 0 while not finished: feed_dict_val, finished, _ = minibatch_iter.incremental_val_feed_dict(size, iter_num) iter_num += 1 outs_val = sess.run([model.loss, model.ranks, model.mrr], feed_dict=feed_dict_val) val_losses.append(outs_val[0]) val_mrrs.append(outs_val[2]) return np.mean(val_losses), np.mean(val_mrrs), (time.time() - t_test) def save_val_embeddings(sess, model, minibatch_iter, size, out_dir, mod=""): val_embeddings = [] finished = False seen = set([]) nodes = [] iter_num = 0 name = "val" while not finished: feed_dict_val, finished, edges = minibatch_iter.incremental_embed_feed_dict(size, iter_num) iter_num += 1 outs_val = sess.run([model.loss, model.mrr, model.outputs1], feed_dict=feed_dict_val) #ONLY SAVE FOR embeds1 because of planetoid for i, edge in enumerate(edges): if not edge[0] in seen: val_embeddings.append(outs_val[-1][i,:]) nodes.append(edge[0]) seen.add(edge[0]) if not os.path.exists(out_dir): os.makedirs(out_dir) val_embeddings = np.vstack(val_embeddings) np.save(out_dir + name + mod + ".npy", val_embeddings) with open(out_dir + name + mod + ".txt", "w") as fp: fp.write("\n".join(map(str,nodes))) def construct_placeholders(): # Define placeholders placeholders = { 'batch1' : tf.placeholder(tf.int32, shape=(None), name='batch1'), 'batch2' : tf.placeholder(tf.int32, shape=(None), name='batch2'), # negative samples for all nodes in the batch 'neg_samples': tf.placeholder(tf.int32, shape=(None,), name='neg_sample_size'), 'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'), 'batch_size' : tf.placeholder(tf.int32, name='batch_size'), } return placeholders def train(train_data, test_data=None): G = train_data[0] features = train_data[1] id_map = train_data[2] print("G: " + str(G)) print("features: " + str(features)) print("id_map: " + str(id_map)) if not features is None: # pad with dummy zero vector features = np.vstack([features, np.zeros((features.shape[1],))]) context_pairs = train_data[3] if FLAGS.random_context else None placeholders = construct_placeholders() #returns a dictionary of placeholder minibatch = EdgeMinibatchIterator(G, #produce a bunch of minibatch id_map, placeholders, batch_size=FLAGS.batch_size, max_degree=FLAGS.max_degree, num_neg_samples=FLAGS.neg_sample_size, context_pairs = context_pairs) #a useful object adj_info_ph = tf.placeholder(tf.int32, shape=minibatch.adj.shape) adj_info = tf.Variable(adj_info_ph, trainable=False, name="adj_info") # with tf.Session() as sess: # sess.run(tf.global_variables_initializer()) # sess.run(tf.local_variables_initializer()) # print("adj_info: " + str(sess.run(adj_info))) if FLAGS.model == 'graphsage_mean': # Create model sampler = UniformNeighborSampler(adj_info)#to wrap the lookup function layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)] model = SampleAndAggregate(placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, model_size=FLAGS.model_size, identity_dim = FLAGS.identity_dim, logging=True) #set training parameters and define loss function etc elif FLAGS.model == 'gcn': # Create model sampler = UniformNeighborSampler(adj_info) layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, 2*FLAGS.dim_1), SAGEInfo("node", sampler, FLAGS.samples_2, 2*FLAGS.dim_2)] model = SampleAndAggregate(placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type="gcn", model_size=FLAGS.model_size, identity_dim = FLAGS.identity_dim, concat=False, logging=True) elif FLAGS.model == 'graphsage_seq': sampler = UniformNeighborSampler(adj_info) layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)] model = SampleAndAggregate(placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, identity_dim = FLAGS.identity_dim, aggregator_type="seq", model_size=FLAGS.model_size, logging=True) elif FLAGS.model == 'graphsage_maxpool': sampler = UniformNeighborSampler(adj_info) layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)] model = SampleAndAggregate(placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type="maxpool", model_size=FLAGS.model_size, identity_dim = FLAGS.identity_dim, logging=True) elif FLAGS.model == 'graphsage_meanpool': sampler = UniformNeighborSampler(adj_info) layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)] model = SampleAndAggregate(placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type="meanpool", model_size=FLAGS.model_size, identity_dim = FLAGS.identity_dim, logging=True) elif FLAGS.model == 'n2v': model = Node2VecModel(placeholders, features.shape[0], minibatch.deg, #2x because graphsage uses concat nodevec_dim=2*FLAGS.dim_1, lr=FLAGS.learning_rate) else: raise Exception('Error: model name unrecognized.') config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement) config.gpu_options.allow_growth = True #config.gpu_options.per_process_gpu_memory_fraction = GPU_MEM_FRACTION config.allow_soft_placement = True # Initialize session sess = tf.Session(config=config) merged = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(log_dir(), sess.graph) # Init variables sess.run(tf.global_variables_initializer(), feed_dict={adj_info_ph: minibatch.adj}) # Train model train_shadow_mrr = None shadow_mrr = None total_steps = 0 avg_time = 0.0 epoch_val_costs = [] train_adj_info = tf.assign(adj_info, minibatch.adj) val_adj_info = tf.assign(adj_info, minibatch.test_adj) for epoch in range(FLAGS.epochs): minibatch.shuffle() iter = 0 print('Epoch: %04d' % (epoch + 1)) epoch_val_costs.append(0) while not minibatch.end(): # Construct feed dictionary feed_dict = minibatch.next_minibatch_feed_dict() feed_dict.update({placeholders['dropout']: FLAGS.dropout}) t = time.time() # Training step outs = sess.run([merged, model.opt_op, model.loss, model.ranks, model.aff_all, model.mrr, model.outputs1], feed_dict=feed_dict) train_cost = outs[2] train_mrr = outs[5] if train_shadow_mrr is None: train_shadow_mrr = train_mrr# else: train_shadow_mrr -= (1-0.99) * (train_shadow_mrr - train_mrr) if iter % FLAGS.validate_iter == 0: # Validation sess.run(val_adj_info.op) val_cost, ranks, val_mrr, duration = evaluate(sess, model, minibatch, size=FLAGS.validate_batch_size) sess.run(train_adj_info.op) epoch_val_costs[-1] += val_cost if shadow_mrr is None: shadow_mrr = val_mrr else: shadow_mrr -= (1-0.99) * (shadow_mrr - val_mrr) if total_steps % FLAGS.print_every == 0: summary_writer.add_summary(outs[0], total_steps) # Print results avg_time = (avg_time * total_steps + time.time() - t) / (total_steps + 1) if total_steps % FLAGS.print_every == 0: print("Iter:", '%04d' % iter, "train_loss=", "{:.5f}".format(train_cost), "train_mrr=", "{:.5f}".format(train_mrr),#Mean reciprocal rank "train_mrr_ema=", "{:.5f}".format(train_shadow_mrr), # exponential moving average "val_loss=", "{:.5f}".format(val_cost), "val_mrr=", "{:.5f}".format(val_mrr), "val_mrr_ema=", "{:.5f}".format(shadow_mrr), # exponential moving average "time=", "{:.5f}".format(avg_time)) iter += 1 total_steps += 1 if total_steps > FLAGS.max_total_steps: break if total_steps > FLAGS.max_total_steps: break print("Optimization Finished!") if FLAGS.save_embeddings: sess.run(val_adj_info.op) save_val_embeddings(sess, model, minibatch, FLAGS.validate_batch_size, log_dir()) if FLAGS.model == "n2v": # stopping the gradient for the already trained nodes train_ids = tf.constant([[id_map[n]] for n in G.nodes_iter() if not G.node[n]['val'] and not G.node[n]['test']], dtype=tf.int32) test_ids = tf.constant([[id_map[n]] for n in G.nodes_iter() if G.node[n]['val'] or G.node[n]['test']], dtype=tf.int32) update_nodes = tf.nn.embedding_lookup(model.context_embeds, tf.squeeze(test_ids)) no_update_nodes = tf.nn.embedding_lookup(model.context_embeds,tf.squeeze(train_ids)) update_nodes = tf.scatter_nd(test_ids, update_nodes, tf.shape(model.context_embeds)) no_update_nodes = tf.stop_gradient(tf.scatter_nd(train_ids, no_update_nodes, tf.shape(model.context_embeds))) model.context_embeds = update_nodes + no_update_nodes sess.run(model.context_embeds) # run random walks from graphsage.utils import run_random_walks nodes = [n for n in G.nodes_iter() if G.node[n]["val"] or G.node[n]["test"]] start_time = time.time() pairs = run_random_walks(G, nodes, num_walks=50) walk_time = time.time() - start_time test_minibatch = EdgeMinibatchIterator(G, id_map, placeholders, batch_size=FLAGS.batch_size, max_degree=FLAGS.max_degree, num_neg_samples=FLAGS.neg_sample_size, context_pairs = pairs, n2v_retrain=True, fixed_n2v=True) start_time = time.time() print("Doing test training for n2v.") test_steps = 0 for epoch in range(FLAGS.n2v_test_epochs): test_minibatch.shuffle() while not test_minibatch.end(): feed_dict = test_minibatch.next_minibatch_feed_dict() feed_dict.update({placeholders['dropout']: FLAGS.dropout}) outs = sess.run([model.opt_op, model.loss, model.ranks, model.aff_all, model.mrr, model.outputs1], feed_dict=feed_dict) if test_steps % FLAGS.print_every == 0: print("Iter:", '%04d' % test_steps, "train_loss=", "{:.5f}".format(outs[1]), "train_mrr=", "{:.5f}".format(outs[-2])) test_steps += 1 train_time = time.time() - start_time save_val_embeddings(sess, model, minibatch, FLAGS.validate_batch_size, log_dir(), mod="-test") print("Total time: ", train_time+walk_time) print("Walk time: ", walk_time) print("Train time: ", train_time) def main(argv=None): print("Loading training data..") train_data = load_data(FLAGS.train_prefix, load_walks=True) # for processing Redit's data (Reddit's data is a bit wired) print("Done loading training data..") train(train_data) if __name__ == '__main__': tf.app.run()
45.302564
129
0.586258
from __future__ import division from __future__ import print_function import os import time import tensorflow as tf import numpy as np from graphsage.models import SampleAndAggregate, SAGEInfo, Node2VecModel from graphsage.minibatch import EdgeMinibatchIterator from graphsage.neigh_samplers import UniformNeighborSampler from graphsage.utils import load_data os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" seed = 123 np.random.seed(seed) tf.set_random_seed(seed) flags = tf.app.flags FLAGS = flags.FLAGS tf.app.flags.DEFINE_boolean('log_device_placement', False, """Whether to log device placement.""") flags.DEFINE_string('model', 'graphsage', 'model names. See README for possible values.') flags.DEFINE_float('learning_rate', 0.00001, 'initial learning rate.') flags.DEFINE_string("model_size", "small", "Can be big or small; model specific def'ns") flags.DEFINE_string('train_prefix', '', 'name of the object file that stores the training data. must be specified.') # left to default values in main experiments flags.DEFINE_integer('epochs', 1, 'number of epochs to train.') flags.DEFINE_float('dropout', 0.0, 'dropout rate (1 - keep probability).') flags.DEFINE_float('weight_decay', 0.0, 'weight for l2 loss on embedding matrix.') flags.DEFINE_integer('max_degree', 100, 'maximum node degree.') flags.DEFINE_integer('samples_1', 25, 'number of samples in layer 1') flags.DEFINE_integer('samples_2', 10, 'number of users samples in layer 2') flags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)') flags.DEFINE_integer('dim_2', 128, 'Size of output dim (final is 2x this, if using concat)') flags.DEFINE_boolean('random_context', True, 'Whether to use random context or direct edges') flags.DEFINE_integer('neg_sample_size', 20, 'number of negative samples') flags.DEFINE_integer('batch_size', 512, 'minibatch size.') flags.DEFINE_integer('n2v_test_epochs', 1, 'Number of new SGD epochs for n2v.') flags.DEFINE_integer('identity_dim', 0, 'Set to positive value to use identity embedding features of that dimension. Default 0.') #logging, saving, validation settings etc. flags.DEFINE_boolean('save_embeddings', True, 'whether to save embeddings for all nodes after training') flags.DEFINE_string('base_log_dir', '.', 'base directory for logging and saving embeddings') flags.DEFINE_integer('validate_iter', 5000, "how often to run a validation minibatch.") flags.DEFINE_integer('validate_batch_size', 256, "how many nodes per validation sample.") flags.DEFINE_integer('gpu', 1, "which gpu to use.") flags.DEFINE_integer('print_every', 50, "How often to print training info.") flags.DEFINE_integer('max_total_steps', 10**10, "Maximum total number of iterations") os.environ["CUDA_VISIBLE_DEVICES"]=str(FLAGS.gpu) GPU_MEM_FRACTION = 0.8 def log_dir(): log_dir = FLAGS.base_log_dir + "/unsup-" + FLAGS.train_prefix.split("/")[-2] log_dir += "/{model:s}_{model_size:s}_{lr:0.6f}/".format( model=FLAGS.model, model_size=FLAGS.model_size, lr=FLAGS.learning_rate) if not os.path.exists(log_dir): os.makedirs(log_dir) return log_dir # Define model evaluation function def evaluate(sess, model, minibatch_iter, size=None): t_test = time.time() feed_dict_val = minibatch_iter.val_feed_dict(size) outs_val = sess.run([model.loss, model.ranks, model.mrr], feed_dict=feed_dict_val) return outs_val[0], outs_val[1], outs_val[2], (time.time() - t_test) def incremental_evaluate(sess, model, minibatch_iter, size): t_test = time.time() finished = False val_losses = [] val_mrrs = [] iter_num = 0 while not finished: feed_dict_val, finished, _ = minibatch_iter.incremental_val_feed_dict(size, iter_num) iter_num += 1 outs_val = sess.run([model.loss, model.ranks, model.mrr], feed_dict=feed_dict_val) val_losses.append(outs_val[0]) val_mrrs.append(outs_val[2]) return np.mean(val_losses), np.mean(val_mrrs), (time.time() - t_test) def save_val_embeddings(sess, model, minibatch_iter, size, out_dir, mod=""): val_embeddings = [] finished = False seen = set([]) nodes = [] iter_num = 0 name = "val" while not finished: feed_dict_val, finished, edges = minibatch_iter.incremental_embed_feed_dict(size, iter_num) iter_num += 1 outs_val = sess.run([model.loss, model.mrr, model.outputs1], feed_dict=feed_dict_val) #ONLY SAVE FOR embeds1 because of planetoid for i, edge in enumerate(edges): if not edge[0] in seen: val_embeddings.append(outs_val[-1][i,:]) nodes.append(edge[0]) seen.add(edge[0]) if not os.path.exists(out_dir): os.makedirs(out_dir) val_embeddings = np.vstack(val_embeddings) np.save(out_dir + name + mod + ".npy", val_embeddings) with open(out_dir + name + mod + ".txt", "w") as fp: fp.write("\n".join(map(str,nodes))) def construct_placeholders(): # Define placeholders placeholders = { 'batch1' : tf.placeholder(tf.int32, shape=(None), name='batch1'), 'batch2' : tf.placeholder(tf.int32, shape=(None), name='batch2'), # negative samples for all nodes in the batch 'neg_samples': tf.placeholder(tf.int32, shape=(None,), name='neg_sample_size'), 'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'), 'batch_size' : tf.placeholder(tf.int32, name='batch_size'), } return placeholders def train(train_data, test_data=None): G = train_data[0] features = train_data[1] id_map = train_data[2] print("G: " + str(G)) print("features: " + str(features)) print("id_map: " + str(id_map)) if not features is None: # pad with dummy zero vector features = np.vstack([features, np.zeros((features.shape[1],))]) context_pairs = train_data[3] if FLAGS.random_context else None placeholders = construct_placeholders() #returns a dictionary of placeholder minibatch = EdgeMinibatchIterator(G, #produce a bunch of minibatch id_map, placeholders, batch_size=FLAGS.batch_size, max_degree=FLAGS.max_degree, num_neg_samples=FLAGS.neg_sample_size, context_pairs = context_pairs) #a useful object adj_info_ph = tf.placeholder(tf.int32, shape=minibatch.adj.shape) adj_info = tf.Variable(adj_info_ph, trainable=False, name="adj_info") # with tf.Session() as sess: # sess.run(tf.global_variables_initializer()) # sess.run(tf.local_variables_initializer()) # print("adj_info: " + str(sess.run(adj_info))) if FLAGS.model == 'graphsage_mean': # Create model sampler = UniformNeighborSampler(adj_info)#to wrap the lookup function layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)] model = SampleAndAggregate(placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, model_size=FLAGS.model_size, identity_dim = FLAGS.identity_dim, logging=True) #set training parameters and define loss function etc elif FLAGS.model == 'gcn': # Create model sampler = UniformNeighborSampler(adj_info) layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, 2*FLAGS.dim_1), SAGEInfo("node", sampler, FLAGS.samples_2, 2*FLAGS.dim_2)] model = SampleAndAggregate(placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type="gcn", model_size=FLAGS.model_size, identity_dim = FLAGS.identity_dim, concat=False, logging=True) elif FLAGS.model == 'graphsage_seq': sampler = UniformNeighborSampler(adj_info) layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)] model = SampleAndAggregate(placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, identity_dim = FLAGS.identity_dim, aggregator_type="seq", model_size=FLAGS.model_size, logging=True) elif FLAGS.model == 'graphsage_maxpool': sampler = UniformNeighborSampler(adj_info) layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)] model = SampleAndAggregate(placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type="maxpool", model_size=FLAGS.model_size, identity_dim = FLAGS.identity_dim, logging=True) elif FLAGS.model == 'graphsage_meanpool': sampler = UniformNeighborSampler(adj_info) layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)] model = SampleAndAggregate(placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type="meanpool", model_size=FLAGS.model_size, identity_dim = FLAGS.identity_dim, logging=True) elif FLAGS.model == 'n2v': model = Node2VecModel(placeholders, features.shape[0], minibatch.deg, #2x because graphsage uses concat nodevec_dim=2*FLAGS.dim_1, lr=FLAGS.learning_rate) else: raise Exception('Error: model name unrecognized.') config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement) config.gpu_options.allow_growth = True #config.gpu_options.per_process_gpu_memory_fraction = GPU_MEM_FRACTION config.allow_soft_placement = True # Initialize session sess = tf.Session(config=config) merged = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(log_dir(), sess.graph) # Init variables sess.run(tf.global_variables_initializer(), feed_dict={adj_info_ph: minibatch.adj}) # Train model train_shadow_mrr = None shadow_mrr = None total_steps = 0 avg_time = 0.0 epoch_val_costs = [] train_adj_info = tf.assign(adj_info, minibatch.adj) val_adj_info = tf.assign(adj_info, minibatch.test_adj) for epoch in range(FLAGS.epochs): minibatch.shuffle() iter = 0 print('Epoch: %04d' % (epoch + 1)) epoch_val_costs.append(0) while not minibatch.end(): # Construct feed dictionary feed_dict = minibatch.next_minibatch_feed_dict() feed_dict.update({placeholders['dropout']: FLAGS.dropout}) t = time.time() # Training step outs = sess.run([merged, model.opt_op, model.loss, model.ranks, model.aff_all, model.mrr, model.outputs1], feed_dict=feed_dict) train_cost = outs[2] train_mrr = outs[5] if train_shadow_mrr is None: train_shadow_mrr = train_mrr# else: train_shadow_mrr -= (1-0.99) * (train_shadow_mrr - train_mrr) if iter % FLAGS.validate_iter == 0: # Validation sess.run(val_adj_info.op) val_cost, ranks, val_mrr, duration = evaluate(sess, model, minibatch, size=FLAGS.validate_batch_size) sess.run(train_adj_info.op) epoch_val_costs[-1] += val_cost if shadow_mrr is None: shadow_mrr = val_mrr else: shadow_mrr -= (1-0.99) * (shadow_mrr - val_mrr) if total_steps % FLAGS.print_every == 0: summary_writer.add_summary(outs[0], total_steps) # Print results avg_time = (avg_time * total_steps + time.time() - t) / (total_steps + 1) if total_steps % FLAGS.print_every == 0: print("Iter:", '%04d' % iter, "train_loss=", "{:.5f}".format(train_cost), "train_mrr=", "{:.5f}".format(train_mrr),#Mean reciprocal rank "train_mrr_ema=", "{:.5f}".format(train_shadow_mrr), # exponential moving average "val_loss=", "{:.5f}".format(val_cost), "val_mrr=", "{:.5f}".format(val_mrr), "val_mrr_ema=", "{:.5f}".format(shadow_mrr), # exponential moving average "time=", "{:.5f}".format(avg_time)) iter += 1 total_steps += 1 if total_steps > FLAGS.max_total_steps: break if total_steps > FLAGS.max_total_steps: break print("Optimization Finished!") if FLAGS.save_embeddings: sess.run(val_adj_info.op) save_val_embeddings(sess, model, minibatch, FLAGS.validate_batch_size, log_dir()) if FLAGS.model == "n2v": # stopping the gradient for the already trained nodes train_ids = tf.constant([[id_map[n]] for n in G.nodes_iter() if not G.node[n]['val'] and not G.node[n]['test']], dtype=tf.int32) test_ids = tf.constant([[id_map[n]] for n in G.nodes_iter() if G.node[n]['val'] or G.node[n]['test']], dtype=tf.int32) update_nodes = tf.nn.embedding_lookup(model.context_embeds, tf.squeeze(test_ids)) no_update_nodes = tf.nn.embedding_lookup(model.context_embeds,tf.squeeze(train_ids)) update_nodes = tf.scatter_nd(test_ids, update_nodes, tf.shape(model.context_embeds)) no_update_nodes = tf.stop_gradient(tf.scatter_nd(train_ids, no_update_nodes, tf.shape(model.context_embeds))) model.context_embeds = update_nodes + no_update_nodes sess.run(model.context_embeds) # run random walks from graphsage.utils import run_random_walks nodes = [n for n in G.nodes_iter() if G.node[n]["val"] or G.node[n]["test"]] start_time = time.time() pairs = run_random_walks(G, nodes, num_walks=50) walk_time = time.time() - start_time test_minibatch = EdgeMinibatchIterator(G, id_map, placeholders, batch_size=FLAGS.batch_size, max_degree=FLAGS.max_degree, num_neg_samples=FLAGS.neg_sample_size, context_pairs = pairs, n2v_retrain=True, fixed_n2v=True) start_time = time.time() print("Doing test training for n2v.") test_steps = 0 for epoch in range(FLAGS.n2v_test_epochs): test_minibatch.shuffle() while not test_minibatch.end(): feed_dict = test_minibatch.next_minibatch_feed_dict() feed_dict.update({placeholders['dropout']: FLAGS.dropout}) outs = sess.run([model.opt_op, model.loss, model.ranks, model.aff_all, model.mrr, model.outputs1], feed_dict=feed_dict) if test_steps % FLAGS.print_every == 0: print("Iter:", '%04d' % test_steps, "train_loss=", "{:.5f}".format(outs[1]), "train_mrr=", "{:.5f}".format(outs[-2])) test_steps += 1 train_time = time.time() - start_time save_val_embeddings(sess, model, minibatch, FLAGS.validate_batch_size, log_dir(), mod="-test") print("Total time: ", train_time+walk_time) print("Walk time: ", walk_time) print("Train time: ", train_time) def main(argv=None): print("Loading training data..") train_data = load_data(FLAGS.train_prefix, load_walks=True) # for processing Redit's data (Reddit's data is a bit wired) print("Done loading training data..") train(train_data) if __name__ == '__main__': tf.app.run()
true
true
1c45f085e004e34a83549f22c405ac311d6001c4
48,492
bzl
Python
examples/crate_universe/vendor_remote_pkgs/crates/defs.bzl
cfredric/rules_rust
521e649ff44e9711fe3c45b0ec1e792f7e1d361e
[ "Apache-2.0" ]
null
null
null
examples/crate_universe/vendor_remote_pkgs/crates/defs.bzl
cfredric/rules_rust
521e649ff44e9711fe3c45b0ec1e792f7e1d361e
[ "Apache-2.0" ]
null
null
null
examples/crate_universe/vendor_remote_pkgs/crates/defs.bzl
cfredric/rules_rust
521e649ff44e9711fe3c45b0ec1e792f7e1d361e
[ "Apache-2.0" ]
null
null
null
############################################################################### # @generated # This file is auto-generated by the cargo-bazel tool. # # DO NOT MODIFY: Local changes may be replaced in future executions. ############################################################################### """ # `crates_repository` API - [aliases](#aliases) - [crate_deps](#crate_deps) - [all_crate_deps](#all_crate_deps) - [crate_repositories](#crate_repositories) """ load("@bazel_skylib//lib:selects.bzl", "selects") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") ############################################################################### # MACROS API ############################################################################### # An identifier that represent common dependencies (unconditional). _COMMON_CONDITION = "" def _flatten_dependency_maps(all_dependency_maps): """Flatten a list of dependency maps into one dictionary. Dependency maps have the following structure: ```python DEPENDENCIES_MAP = { # The first key in the map is a Bazel package # name of the workspace this file is defined in. "workspace_member_package": { # Not all dependnecies are supported for all platforms. # the condition key is the condition required to be true # on the host platform. "condition": { # An alias to a crate target. # The label of the crate target the # Aliases are only crate names. # package name refers to. "package_name": "@full//:label", } } } ``` Args: all_dependency_maps (list): A list of dicts as described above Returns: dict: A dictionary as described above """ dependencies = {} for workspace_deps_map in all_dependency_maps: for pkg_name, conditional_deps_map in workspace_deps_map.items(): if pkg_name not in dependencies: non_frozen_map = dict() for key, values in conditional_deps_map.items(): non_frozen_map.update({key: dict(values.items())}) dependencies.setdefault(pkg_name, non_frozen_map) continue for condition, deps_map in conditional_deps_map.items(): # If the condition has not been recorded, do so and continue if condition not in dependencies[pkg_name]: dependencies[pkg_name].setdefault(condition, dict(deps_map.items())) continue # Alert on any miss-matched dependencies inconsistent_entries = [] for crate_name, crate_label in deps_map.items(): existing = dependencies[pkg_name][condition].get(crate_name) if existing and existing != crate_label: inconsistent_entries.append((crate_name, existing, crate_label)) dependencies[pkg_name][condition].update({crate_name: crate_label}) return dependencies def crate_deps(deps, package_name = None): """Finds the fully qualified label of the requested crates for the package where this macro is called. Args: deps (list): The desired list of crate targets. package_name (str, optional): The package name of the set of dependencies to look up. Defaults to `native.package_name()`. Returns: list: A list of labels to generated rust targets (str) """ if not deps: return [] if package_name == None: package_name = native.package_name() # Join both sets of dependencies dependencies = _flatten_dependency_maps([ _NORMAL_DEPENDENCIES, _NORMAL_DEV_DEPENDENCIES, _PROC_MACRO_DEPENDENCIES, _PROC_MACRO_DEV_DEPENDENCIES, _BUILD_DEPENDENCIES, _BUILD_PROC_MACRO_DEPENDENCIES, ]).pop(package_name, {}) # Combine all conditional packages so we can easily index over a flat list # TODO: Perhaps this should actually return select statements and maintain # the conditionals of the dependencies flat_deps = {} for deps_set in dependencies.values(): for crate_name, crate_label in deps_set.items(): flat_deps.update({crate_name: crate_label}) missing_crates = [] crate_targets = [] for crate_target in deps: if crate_target not in flat_deps: missing_crates.append(crate_target) else: crate_targets.append(flat_deps[crate_target]) if missing_crates: fail("Could not find crates `{}` among dependencies of `{}`. Available dependencies were `{}`".format( missing_crates, package_name, dependencies, )) return crate_targets def all_crate_deps( normal = False, normal_dev = False, proc_macro = False, proc_macro_dev = False, build = False, build_proc_macro = False, package_name = None): """Finds the fully qualified label of all requested direct crate dependencies \ for the package where this macro is called. If no parameters are set, all normal dependencies are returned. Setting any one flag will otherwise impact the contents of the returned list. Args: normal (bool, optional): If True, normal dependencies are included in the output list. normal_dev (bool, optional): If True, normla dev dependencies will be included in the output list.. proc_macro (bool, optional): If True, proc_macro dependencies are included in the output list. proc_macro_dev (bool, optional): If True, dev proc_macro dependencies are included in the output list. build (bool, optional): If True, build dependencies are included in the output list. build_proc_macro (bool, optional): If True, build proc_macro dependencies are included in the output list. package_name (str, optional): The package name of the set of dependencies to look up. Defaults to `native.package_name()` when unset. Returns: list: A list of labels to generated rust targets (str) """ if package_name == None: package_name = native.package_name() # Determine the relevant maps to use all_dependency_maps = [] if normal: all_dependency_maps.append(_NORMAL_DEPENDENCIES) if normal_dev: all_dependency_maps.append(_NORMAL_DEV_DEPENDENCIES) if proc_macro: all_dependency_maps.append(_PROC_MACRO_DEPENDENCIES) if proc_macro_dev: all_dependency_maps.append(_PROC_MACRO_DEV_DEPENDENCIES) if build: all_dependency_maps.append(_BUILD_DEPENDENCIES) if build_proc_macro: all_dependency_maps.append(_BUILD_PROC_MACRO_DEPENDENCIES) # Default to always using normal dependencies if not all_dependency_maps: all_dependency_maps.append(_NORMAL_DEPENDENCIES) dependencies = _flatten_dependency_maps(all_dependency_maps).pop(package_name, None) if not dependencies: if dependencies == None: fail("Tried to get all_crate_deps for package " + package_name + " but that package had no Cargo.toml file") else: return [] crate_deps = list(dependencies.pop(_COMMON_CONDITION, {}).values()) for condition, deps in dependencies.items(): crate_deps += selects.with_or({_CONDITIONS[condition]: deps.values()}) return crate_deps def aliases( normal = False, normal_dev = False, proc_macro = False, proc_macro_dev = False, build = False, build_proc_macro = False, package_name = None): """Produces a map of Crate alias names to their original label If no dependency kinds are specified, `normal` and `proc_macro` are used by default. Setting any one flag will otherwise determine the contents of the returned dict. Args: normal (bool, optional): If True, normal dependencies are included in the output list. normal_dev (bool, optional): If True, normla dev dependencies will be included in the output list.. proc_macro (bool, optional): If True, proc_macro dependencies are included in the output list. proc_macro_dev (bool, optional): If True, dev proc_macro dependencies are included in the output list. build (bool, optional): If True, build dependencies are included in the output list. build_proc_macro (bool, optional): If True, build proc_macro dependencies are included in the output list. package_name (str, optional): The package name of the set of dependencies to look up. Defaults to `native.package_name()` when unset. Returns: dict: The aliases of all associated packages """ if package_name == None: package_name = native.package_name() # Determine the relevant maps to use all_aliases_maps = [] if normal: all_aliases_maps.append(_NORMAL_ALIASES) if normal_dev: all_aliases_maps.append(_NORMAL_DEV_ALIASES) if proc_macro: all_aliases_maps.append(_PROC_MACRO_ALIASES) if proc_macro_dev: all_aliases_maps.append(_PROC_MACRO_DEV_ALIASES) if build: all_aliases_maps.append(_BUILD_ALIASES) if build_proc_macro: all_aliases_maps.append(_BUILD_PROC_MACRO_ALIASES) # Default to always using normal aliases if not all_aliases_maps: all_aliases_maps.append(_NORMAL_ALIASES) all_aliases_maps.append(_PROC_MACRO_ALIASES) aliases = _flatten_dependency_maps(all_aliases_maps).pop(package_name, None) if not aliases: return dict() common_items = aliases.pop(_COMMON_CONDITION, {}).items() # If there are only common items in the dictionary, immediately return them if not len(aliases.keys()) == 1: return dict(common_items) # Build a single select statement where each conditional has accounted for the # common set of aliases. crate_aliases = {"//conditions:default": common_items} for condition, deps in aliases.items(): condition_triples = _CONDITIONS[condition] if condition_triples in crate_aliases: crate_aliases[condition_triples].update(deps) else: crate_aliases.update({_CONDITIONS[condition]: dict(deps.items() + common_items)}) return selects.with_or(crate_aliases) ############################################################################### # WORKSPACE MEMBER DEPS AND ALIASES ############################################################################### _NORMAL_DEPENDENCIES = { "": { _COMMON_CONDITION: { "axum": "@crates_vendor_pkgs__axum-0.4.8//:axum", "hyper": "@crates_vendor_pkgs__hyper-0.14.18//:hyper", "mime": "@crates_vendor_pkgs__mime-0.3.16//:mime", "serde_json": "@crates_vendor_pkgs__serde_json-1.0.81//:serde_json", "tokio": "@crates_vendor_pkgs__tokio-1.16.1//:tokio", "tower": "@crates_vendor_pkgs__tower-0.4.12//:tower", "tower-http": "@crates_vendor_pkgs__tower-http-0.2.5//:tower_http", "tracing": "@crates_vendor_pkgs__tracing-0.1.34//:tracing", "tracing-subscriber": "@crates_vendor_pkgs__tracing-subscriber-0.3.11//:tracing_subscriber", }, }, } _NORMAL_ALIASES = { "": { _COMMON_CONDITION: { }, }, } _NORMAL_DEV_DEPENDENCIES = { "": { }, } _NORMAL_DEV_ALIASES = { "": { }, } _PROC_MACRO_DEPENDENCIES = { "": { }, } _PROC_MACRO_ALIASES = { "": { }, } _PROC_MACRO_DEV_DEPENDENCIES = { "": { }, } _PROC_MACRO_DEV_ALIASES = { "": { }, } _BUILD_DEPENDENCIES = { "": { }, } _BUILD_ALIASES = { "": { }, } _BUILD_PROC_MACRO_DEPENDENCIES = { "": { }, } _BUILD_PROC_MACRO_ALIASES = { "": { }, } _CONDITIONS = { "cfg(all(any(target_arch = \"x86_64\", target_arch = \"aarch64\"), target_os = \"hermit\"))": [], "cfg(not(windows))": ["aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", "aarch64-linux-android", "aarch64-unknown-linux-gnu", "arm-unknown-linux-gnueabi", "armv7-unknown-linux-gnueabi", "i686-apple-darwin", "i686-linux-android", "i686-unknown-freebsd", "i686-unknown-linux-gnu", "powerpc-unknown-linux-gnu", "riscv32imc-unknown-none-elf", "s390x-unknown-linux-gnu", "wasm32-unknown-unknown", "wasm32-wasi", "x86_64-apple-darwin", "x86_64-apple-ios", "x86_64-linux-android", "x86_64-unknown-freebsd", "x86_64-unknown-linux-gnu"], "cfg(target_os = \"redox\")": [], "cfg(target_os = \"windows\")": ["i686-pc-windows-msvc", "x86_64-pc-windows-msvc"], "cfg(tracing_unstable)": [], "cfg(unix)": ["aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", "aarch64-linux-android", "aarch64-unknown-linux-gnu", "arm-unknown-linux-gnueabi", "armv7-unknown-linux-gnueabi", "i686-apple-darwin", "i686-linux-android", "i686-unknown-freebsd", "i686-unknown-linux-gnu", "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", "x86_64-linux-android", "x86_64-unknown-freebsd", "x86_64-unknown-linux-gnu"], "cfg(windows)": ["i686-pc-windows-msvc", "x86_64-pc-windows-msvc"], "i686-pc-windows-gnu": [], "x86_64-pc-windows-gnu": [], } ############################################################################### def crate_repositories(): """A macro for defining repositories for all generated crates""" maybe( http_archive, name = "crates_vendor_pkgs__ansi_term-0.12.1", sha256 = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/ansi_term/0.12.1/download"], strip_prefix = "ansi_term-0.12.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.ansi_term-0.12.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__async-trait-0.1.53", sha256 = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/async-trait/0.1.53/download"], strip_prefix = "async-trait-0.1.53", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.async-trait-0.1.53.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__autocfg-1.1.0", sha256 = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/autocfg/1.1.0/download"], strip_prefix = "autocfg-1.1.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.autocfg-1.1.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__axum-0.4.8", sha256 = "c9f346c92c1e9a71d14fe4aaf7c2a5d9932cc4e5e48d8fb6641524416eb79ddd", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/axum/0.4.8/download"], strip_prefix = "axum-0.4.8", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.axum-0.4.8.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__axum-core-0.1.2", sha256 = "6dbcda393bef9c87572779cb8ef916f12d77750b27535dd6819fa86591627a51", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/axum-core/0.1.2/download"], strip_prefix = "axum-core-0.1.2", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.axum-core-0.1.2.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__bitflags-1.3.2", sha256 = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/bitflags/1.3.2/download"], strip_prefix = "bitflags-1.3.2", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.bitflags-1.3.2.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__bytes-1.1.0", sha256 = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/bytes/1.1.0/download"], strip_prefix = "bytes-1.1.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.bytes-1.1.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__cfg-if-1.0.0", sha256 = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/cfg-if/1.0.0/download"], strip_prefix = "cfg-if-1.0.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.cfg-if-1.0.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__fnv-1.0.7", sha256 = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/fnv/1.0.7/download"], strip_prefix = "fnv-1.0.7", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.fnv-1.0.7.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__form_urlencoded-1.0.1", sha256 = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/form_urlencoded/1.0.1/download"], strip_prefix = "form_urlencoded-1.0.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.form_urlencoded-1.0.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__futures-channel-0.3.21", sha256 = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/futures-channel/0.3.21/download"], strip_prefix = "futures-channel-0.3.21", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.futures-channel-0.3.21.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__futures-core-0.3.21", sha256 = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/futures-core/0.3.21/download"], strip_prefix = "futures-core-0.3.21", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.futures-core-0.3.21.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__futures-sink-0.3.21", sha256 = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/futures-sink/0.3.21/download"], strip_prefix = "futures-sink-0.3.21", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.futures-sink-0.3.21.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__futures-task-0.3.21", sha256 = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/futures-task/0.3.21/download"], strip_prefix = "futures-task-0.3.21", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.futures-task-0.3.21.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__futures-util-0.3.21", sha256 = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/futures-util/0.3.21/download"], strip_prefix = "futures-util-0.3.21", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.futures-util-0.3.21.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__h2-0.3.13", sha256 = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/h2/0.3.13/download"], strip_prefix = "h2-0.3.13", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.h2-0.3.13.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__hashbrown-0.11.2", sha256 = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/hashbrown/0.11.2/download"], strip_prefix = "hashbrown-0.11.2", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.hashbrown-0.11.2.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__hermit-abi-0.1.19", sha256 = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/hermit-abi/0.1.19/download"], strip_prefix = "hermit-abi-0.1.19", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.hermit-abi-0.1.19.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__http-0.2.7", sha256 = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/http/0.2.7/download"], strip_prefix = "http-0.2.7", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.http-0.2.7.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__http-body-0.4.5", sha256 = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/http-body/0.4.5/download"], strip_prefix = "http-body-0.4.5", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.http-body-0.4.5.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__http-range-header-0.3.0", sha256 = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/http-range-header/0.3.0/download"], strip_prefix = "http-range-header-0.3.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.http-range-header-0.3.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__httparse-1.7.1", sha256 = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/httparse/1.7.1/download"], strip_prefix = "httparse-1.7.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.httparse-1.7.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__httpdate-1.0.2", sha256 = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/httpdate/1.0.2/download"], strip_prefix = "httpdate-1.0.2", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.httpdate-1.0.2.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__hyper-0.14.18", sha256 = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/hyper/0.14.18/download"], strip_prefix = "hyper-0.14.18", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.hyper-0.14.18.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__indexmap-1.8.1", sha256 = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/indexmap/1.8.1/download"], strip_prefix = "indexmap-1.8.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.indexmap-1.8.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__instant-0.1.12", sha256 = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/instant/0.1.12/download"], strip_prefix = "instant-0.1.12", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.instant-0.1.12.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__itoa-1.0.2", sha256 = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/itoa/1.0.2/download"], strip_prefix = "itoa-1.0.2", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.itoa-1.0.2.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__lazy_static-1.4.0", sha256 = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/lazy_static/1.4.0/download"], strip_prefix = "lazy_static-1.4.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.lazy_static-1.4.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__libc-0.2.126", sha256 = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/libc/0.2.126/download"], strip_prefix = "libc-0.2.126", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.libc-0.2.126.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__lock_api-0.4.7", sha256 = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/lock_api/0.4.7/download"], strip_prefix = "lock_api-0.4.7", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.lock_api-0.4.7.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__log-0.4.17", sha256 = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/log/0.4.17/download"], strip_prefix = "log-0.4.17", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.log-0.4.17.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__matches-0.1.9", sha256 = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/matches/0.1.9/download"], strip_prefix = "matches-0.1.9", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.matches-0.1.9.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__matchit-0.4.6", sha256 = "9376a4f0340565ad675d11fc1419227faf5f60cd7ac9cb2e7185a471f30af833", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/matchit/0.4.6/download"], strip_prefix = "matchit-0.4.6", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.matchit-0.4.6.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__memchr-2.5.0", sha256 = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/memchr/2.5.0/download"], strip_prefix = "memchr-2.5.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.memchr-2.5.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__mime-0.3.16", sha256 = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/mime/0.3.16/download"], strip_prefix = "mime-0.3.16", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.mime-0.3.16.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__mio-0.7.14", sha256 = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/mio/0.7.14/download"], strip_prefix = "mio-0.7.14", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.mio-0.7.14.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__miow-0.3.7", sha256 = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/miow/0.3.7/download"], strip_prefix = "miow-0.3.7", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.miow-0.3.7.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__ntapi-0.3.7", sha256 = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/ntapi/0.3.7/download"], strip_prefix = "ntapi-0.3.7", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.ntapi-0.3.7.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__num_cpus-1.13.1", sha256 = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/num_cpus/1.13.1/download"], strip_prefix = "num_cpus-1.13.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.num_cpus-1.13.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__once_cell-1.12.0", sha256 = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/once_cell/1.12.0/download"], strip_prefix = "once_cell-1.12.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.once_cell-1.12.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__parking_lot-0.11.2", sha256 = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/parking_lot/0.11.2/download"], strip_prefix = "parking_lot-0.11.2", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.parking_lot-0.11.2.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__parking_lot_core-0.8.5", sha256 = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/parking_lot_core/0.8.5/download"], strip_prefix = "parking_lot_core-0.8.5", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.parking_lot_core-0.8.5.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__percent-encoding-2.1.0", sha256 = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/percent-encoding/2.1.0/download"], strip_prefix = "percent-encoding-2.1.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.percent-encoding-2.1.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__pin-project-1.0.10", sha256 = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/pin-project/1.0.10/download"], strip_prefix = "pin-project-1.0.10", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-project-1.0.10.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__pin-project-internal-1.0.10", sha256 = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/pin-project-internal/1.0.10/download"], strip_prefix = "pin-project-internal-1.0.10", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-project-internal-1.0.10.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__pin-project-lite-0.2.9", sha256 = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/pin-project-lite/0.2.9/download"], strip_prefix = "pin-project-lite-0.2.9", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-project-lite-0.2.9.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__pin-utils-0.1.0", sha256 = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/pin-utils/0.1.0/download"], strip_prefix = "pin-utils-0.1.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-utils-0.1.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__proc-macro2-1.0.39", sha256 = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/proc-macro2/1.0.39/download"], strip_prefix = "proc-macro2-1.0.39", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.proc-macro2-1.0.39.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__quote-1.0.18", sha256 = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/quote/1.0.18/download"], strip_prefix = "quote-1.0.18", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.quote-1.0.18.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__redox_syscall-0.2.13", sha256 = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/redox_syscall/0.2.13/download"], strip_prefix = "redox_syscall-0.2.13", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.redox_syscall-0.2.13.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__ryu-1.0.10", sha256 = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/ryu/1.0.10/download"], strip_prefix = "ryu-1.0.10", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.ryu-1.0.10.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__scopeguard-1.1.0", sha256 = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/scopeguard/1.1.0/download"], strip_prefix = "scopeguard-1.1.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.scopeguard-1.1.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__serde-1.0.137", sha256 = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/serde/1.0.137/download"], strip_prefix = "serde-1.0.137", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.serde-1.0.137.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__serde_json-1.0.81", sha256 = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/serde_json/1.0.81/download"], strip_prefix = "serde_json-1.0.81", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.serde_json-1.0.81.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__serde_urlencoded-0.7.1", sha256 = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/serde_urlencoded/0.7.1/download"], strip_prefix = "serde_urlencoded-0.7.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.serde_urlencoded-0.7.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__sharded-slab-0.1.4", sha256 = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/sharded-slab/0.1.4/download"], strip_prefix = "sharded-slab-0.1.4", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.sharded-slab-0.1.4.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__signal-hook-registry-1.4.0", sha256 = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/signal-hook-registry/1.4.0/download"], strip_prefix = "signal-hook-registry-1.4.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.signal-hook-registry-1.4.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__slab-0.4.6", sha256 = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/slab/0.4.6/download"], strip_prefix = "slab-0.4.6", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.slab-0.4.6.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__smallvec-1.8.0", sha256 = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/smallvec/1.8.0/download"], strip_prefix = "smallvec-1.8.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.smallvec-1.8.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__socket2-0.4.4", sha256 = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/socket2/0.4.4/download"], strip_prefix = "socket2-0.4.4", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.socket2-0.4.4.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__syn-1.0.95", sha256 = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/syn/1.0.95/download"], strip_prefix = "syn-1.0.95", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.syn-1.0.95.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__sync_wrapper-0.1.1", sha256 = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/sync_wrapper/0.1.1/download"], strip_prefix = "sync_wrapper-0.1.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.sync_wrapper-0.1.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__thread_local-1.1.4", sha256 = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/thread_local/1.1.4/download"], strip_prefix = "thread_local-1.1.4", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.thread_local-1.1.4.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tokio-1.16.1", sha256 = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tokio/1.16.1/download"], strip_prefix = "tokio-1.16.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tokio-1.16.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tokio-macros-1.7.0", sha256 = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tokio-macros/1.7.0/download"], strip_prefix = "tokio-macros-1.7.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tokio-macros-1.7.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tokio-util-0.7.2", sha256 = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tokio-util/0.7.2/download"], strip_prefix = "tokio-util-0.7.2", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tokio-util-0.7.2.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tower-0.4.12", sha256 = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tower/0.4.12/download"], strip_prefix = "tower-0.4.12", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-0.4.12.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tower-http-0.2.5", sha256 = "aba3f3efabf7fb41fae8534fc20a817013dd1c12cb45441efb6c82e6556b4cd8", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tower-http/0.2.5/download"], strip_prefix = "tower-http-0.2.5", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-http-0.2.5.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tower-layer-0.3.1", sha256 = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tower-layer/0.3.1/download"], strip_prefix = "tower-layer-0.3.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-layer-0.3.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tower-service-0.3.1", sha256 = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tower-service/0.3.1/download"], strip_prefix = "tower-service-0.3.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-service-0.3.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tracing-0.1.34", sha256 = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tracing/0.1.34/download"], strip_prefix = "tracing-0.1.34", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-0.1.34.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tracing-attributes-0.1.21", sha256 = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tracing-attributes/0.1.21/download"], strip_prefix = "tracing-attributes-0.1.21", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-attributes-0.1.21.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tracing-core-0.1.26", sha256 = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tracing-core/0.1.26/download"], strip_prefix = "tracing-core-0.1.26", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-core-0.1.26.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tracing-log-0.1.3", sha256 = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tracing-log/0.1.3/download"], strip_prefix = "tracing-log-0.1.3", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-log-0.1.3.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tracing-subscriber-0.3.11", sha256 = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tracing-subscriber/0.3.11/download"], strip_prefix = "tracing-subscriber-0.3.11", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-subscriber-0.3.11.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__try-lock-0.2.3", sha256 = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/try-lock/0.2.3/download"], strip_prefix = "try-lock-0.2.3", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.try-lock-0.2.3.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__unicode-ident-1.0.0", sha256 = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/unicode-ident/1.0.0/download"], strip_prefix = "unicode-ident-1.0.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.unicode-ident-1.0.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__valuable-0.1.0", sha256 = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/valuable/0.1.0/download"], strip_prefix = "valuable-0.1.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.valuable-0.1.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__want-0.3.0", sha256 = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/want/0.3.0/download"], strip_prefix = "want-0.3.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.want-0.3.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__winapi-0.3.9", sha256 = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/winapi/0.3.9/download"], strip_prefix = "winapi-0.3.9", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.winapi-0.3.9.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__winapi-i686-pc-windows-gnu-0.4.0", sha256 = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/winapi-i686-pc-windows-gnu/0.4.0/download"], strip_prefix = "winapi-i686-pc-windows-gnu-0.4.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.winapi-i686-pc-windows-gnu-0.4.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__winapi-x86_64-pc-windows-gnu-0.4.0", sha256 = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/winapi-x86_64-pc-windows-gnu/0.4.0/download"], strip_prefix = "winapi-x86_64-pc-windows-gnu-0.4.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.winapi-x86_64-pc-windows-gnu-0.4.0.bazel"), )
40.477462
552
0.644086
es.io/api/v1/crates/matches/0.1.9/download"], strip_prefix = "matches-0.1.9", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.matches-0.1.9.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__matchit-0.4.6", sha256 = "9376a4f0340565ad675d11fc1419227faf5f60cd7ac9cb2e7185a471f30af833", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/matchit/0.4.6/download"], strip_prefix = "matchit-0.4.6", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.matchit-0.4.6.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__memchr-2.5.0", sha256 = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/memchr/2.5.0/download"], strip_prefix = "memchr-2.5.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.memchr-2.5.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__mime-0.3.16", sha256 = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/mime/0.3.16/download"], strip_prefix = "mime-0.3.16", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.mime-0.3.16.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__mio-0.7.14", sha256 = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/mio/0.7.14/download"], strip_prefix = "mio-0.7.14", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.mio-0.7.14.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__miow-0.3.7", sha256 = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/miow/0.3.7/download"], strip_prefix = "miow-0.3.7", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.miow-0.3.7.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__ntapi-0.3.7", sha256 = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/ntapi/0.3.7/download"], strip_prefix = "ntapi-0.3.7", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.ntapi-0.3.7.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__num_cpus-1.13.1", sha256 = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/num_cpus/1.13.1/download"], strip_prefix = "num_cpus-1.13.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.num_cpus-1.13.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__once_cell-1.12.0", sha256 = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/once_cell/1.12.0/download"], strip_prefix = "once_cell-1.12.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.once_cell-1.12.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__parking_lot-0.11.2", sha256 = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/parking_lot/0.11.2/download"], strip_prefix = "parking_lot-0.11.2", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.parking_lot-0.11.2.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__parking_lot_core-0.8.5", sha256 = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/parking_lot_core/0.8.5/download"], strip_prefix = "parking_lot_core-0.8.5", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.parking_lot_core-0.8.5.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__percent-encoding-2.1.0", sha256 = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/percent-encoding/2.1.0/download"], strip_prefix = "percent-encoding-2.1.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.percent-encoding-2.1.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__pin-project-1.0.10", sha256 = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/pin-project/1.0.10/download"], strip_prefix = "pin-project-1.0.10", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-project-1.0.10.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__pin-project-internal-1.0.10", sha256 = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/pin-project-internal/1.0.10/download"], strip_prefix = "pin-project-internal-1.0.10", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-project-internal-1.0.10.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__pin-project-lite-0.2.9", sha256 = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/pin-project-lite/0.2.9/download"], strip_prefix = "pin-project-lite-0.2.9", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-project-lite-0.2.9.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__pin-utils-0.1.0", sha256 = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/pin-utils/0.1.0/download"], strip_prefix = "pin-utils-0.1.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-utils-0.1.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__proc-macro2-1.0.39", sha256 = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/proc-macro2/1.0.39/download"], strip_prefix = "proc-macro2-1.0.39", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.proc-macro2-1.0.39.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__quote-1.0.18", sha256 = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/quote/1.0.18/download"], strip_prefix = "quote-1.0.18", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.quote-1.0.18.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__redox_syscall-0.2.13", sha256 = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/redox_syscall/0.2.13/download"], strip_prefix = "redox_syscall-0.2.13", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.redox_syscall-0.2.13.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__ryu-1.0.10", sha256 = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/ryu/1.0.10/download"], strip_prefix = "ryu-1.0.10", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.ryu-1.0.10.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__scopeguard-1.1.0", sha256 = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/scopeguard/1.1.0/download"], strip_prefix = "scopeguard-1.1.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.scopeguard-1.1.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__serde-1.0.137", sha256 = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/serde/1.0.137/download"], strip_prefix = "serde-1.0.137", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.serde-1.0.137.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__serde_json-1.0.81", sha256 = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/serde_json/1.0.81/download"], strip_prefix = "serde_json-1.0.81", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.serde_json-1.0.81.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__serde_urlencoded-0.7.1", sha256 = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/serde_urlencoded/0.7.1/download"], strip_prefix = "serde_urlencoded-0.7.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.serde_urlencoded-0.7.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__sharded-slab-0.1.4", sha256 = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/sharded-slab/0.1.4/download"], strip_prefix = "sharded-slab-0.1.4", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.sharded-slab-0.1.4.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__signal-hook-registry-1.4.0", sha256 = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/signal-hook-registry/1.4.0/download"], strip_prefix = "signal-hook-registry-1.4.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.signal-hook-registry-1.4.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__slab-0.4.6", sha256 = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/slab/0.4.6/download"], strip_prefix = "slab-0.4.6", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.slab-0.4.6.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__smallvec-1.8.0", sha256 = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/smallvec/1.8.0/download"], strip_prefix = "smallvec-1.8.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.smallvec-1.8.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__socket2-0.4.4", sha256 = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/socket2/0.4.4/download"], strip_prefix = "socket2-0.4.4", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.socket2-0.4.4.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__syn-1.0.95", sha256 = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/syn/1.0.95/download"], strip_prefix = "syn-1.0.95", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.syn-1.0.95.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__sync_wrapper-0.1.1", sha256 = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/sync_wrapper/0.1.1/download"], strip_prefix = "sync_wrapper-0.1.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.sync_wrapper-0.1.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__thread_local-1.1.4", sha256 = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/thread_local/1.1.4/download"], strip_prefix = "thread_local-1.1.4", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.thread_local-1.1.4.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tokio-1.16.1", sha256 = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tokio/1.16.1/download"], strip_prefix = "tokio-1.16.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tokio-1.16.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tokio-macros-1.7.0", sha256 = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tokio-macros/1.7.0/download"], strip_prefix = "tokio-macros-1.7.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tokio-macros-1.7.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tokio-util-0.7.2", sha256 = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tokio-util/0.7.2/download"], strip_prefix = "tokio-util-0.7.2", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tokio-util-0.7.2.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tower-0.4.12", sha256 = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tower/0.4.12/download"], strip_prefix = "tower-0.4.12", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-0.4.12.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tower-http-0.2.5", sha256 = "aba3f3efabf7fb41fae8534fc20a817013dd1c12cb45441efb6c82e6556b4cd8", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tower-http/0.2.5/download"], strip_prefix = "tower-http-0.2.5", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-http-0.2.5.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tower-layer-0.3.1", sha256 = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tower-layer/0.3.1/download"], strip_prefix = "tower-layer-0.3.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-layer-0.3.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tower-service-0.3.1", sha256 = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tower-service/0.3.1/download"], strip_prefix = "tower-service-0.3.1", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-service-0.3.1.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tracing-0.1.34", sha256 = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tracing/0.1.34/download"], strip_prefix = "tracing-0.1.34", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-0.1.34.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tracing-attributes-0.1.21", sha256 = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tracing-attributes/0.1.21/download"], strip_prefix = "tracing-attributes-0.1.21", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-attributes-0.1.21.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tracing-core-0.1.26", sha256 = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tracing-core/0.1.26/download"], strip_prefix = "tracing-core-0.1.26", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-core-0.1.26.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tracing-log-0.1.3", sha256 = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tracing-log/0.1.3/download"], strip_prefix = "tracing-log-0.1.3", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-log-0.1.3.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__tracing-subscriber-0.3.11", sha256 = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/tracing-subscriber/0.3.11/download"], strip_prefix = "tracing-subscriber-0.3.11", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-subscriber-0.3.11.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__try-lock-0.2.3", sha256 = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/try-lock/0.2.3/download"], strip_prefix = "try-lock-0.2.3", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.try-lock-0.2.3.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__unicode-ident-1.0.0", sha256 = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/unicode-ident/1.0.0/download"], strip_prefix = "unicode-ident-1.0.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.unicode-ident-1.0.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__valuable-0.1.0", sha256 = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/valuable/0.1.0/download"], strip_prefix = "valuable-0.1.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.valuable-0.1.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__want-0.3.0", sha256 = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/want/0.3.0/download"], strip_prefix = "want-0.3.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.want-0.3.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__winapi-0.3.9", sha256 = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/winapi/0.3.9/download"], strip_prefix = "winapi-0.3.9", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.winapi-0.3.9.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__winapi-i686-pc-windows-gnu-0.4.0", sha256 = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/winapi-i686-pc-windows-gnu/0.4.0/download"], strip_prefix = "winapi-i686-pc-windows-gnu-0.4.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.winapi-i686-pc-windows-gnu-0.4.0.bazel"), ) maybe( http_archive, name = "crates_vendor_pkgs__winapi-x86_64-pc-windows-gnu-0.4.0", sha256 = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f", type = "tar.gz", urls = ["https://crates.io/api/v1/crates/winapi-x86_64-pc-windows-gnu/0.4.0/download"], strip_prefix = "winapi-x86_64-pc-windows-gnu-0.4.0", build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.winapi-x86_64-pc-windows-gnu-0.4.0.bazel"), )
true
true
1c45f11c3e11537796edf6b58ae0fb0bad91f88e
10,785
py
Python
code/scene.py
NoOneZero/coppelia
14f589b361025506bf1dc2733edc5cf3ce27f45a
[ "Apache-2.0" ]
1
2021-01-09T20:14:11.000Z
2021-01-09T20:14:11.000Z
code/scene.py
NoOneZero/coppelia
14f589b361025506bf1dc2733edc5cf3ce27f45a
[ "Apache-2.0" ]
null
null
null
code/scene.py
NoOneZero/coppelia
14f589b361025506bf1dc2733edc5cf3ce27f45a
[ "Apache-2.0" ]
null
null
null
from code.character import Character from code.spider import Spider from code.neuro import Neuro from code.excel import ExcelManager from code.csv_manager import CscManager import b0RemoteApi import code.config as config import time from random import choices import random class Scene: def __init__(self): self.__set_connetcion_variables_to_lib_files() self.__create_spiders() self.__create_or_connect_to_file() self.__create_neuro() self.__set_parameters_of_neuro() self.__set_parameters_for_loop_work() def __set_connetcion_variables_to_lib_files(self): self.python_client = 'b0RemoteApi_pythonClient' self.remote_api = 'b0RemoteApi_first' self.client = None def __create_spiders(self): # self.characters = [] # for i in range(config.NUMBER_OF_SPIDERS): # self.characters.append(Character()) self.spiders = [Spider()] for i in range(1, config.NUMBER_OF_SPIDERS): self.spiders.append(Spider("#{}".format(i - 1))) def __create_or_connect_to_file(self): # self.excel = ExcelManager(name=config.FILE_NAME, size=len(self.spiders)) self.csv_manager = CscManager(name=config.FILE_NAME) def __create_neuro(self): self.neuro = [] self.neuro_father = Neuro() self.neuro_mother = Neuro() self.neuro.append(Neuro()) self.fitnes = [0] * len(self.spiders) self.fitnes_radical = [0] * len(self.spiders) for i in range(1, len(self.spiders)): self.neuro.append(Neuro(mutant_power = 1)) # high, weigh = self.excel.read(0) # if (high != None): # # count = 0 # for i in range(len(self.neuro[0].axon_weigh)): # to_add = count # for j in range(len(self.neuro[0].axon_weigh[i])): # for k in range(len(self.neuro[0].axon_weigh[i][j])): # count += 1 # self.neuro[0].axon_weigh[i][j][k] \ # = weigh[to_add + k + j * len(self.neuro[0].axon_weigh[i][j])] # # for w in range(1, len(self.spiders)): # high, weigh = self.excel.read(w) # self.neuro.append(Neuro()) # count = 0 # for i in range(len(self.neuro[w].axon_weigh)): # to_add = count # for j in range(len(self.neuro[w].axon_weigh[i])): # for k in range(len(self.neuro[w].axon_weigh[i][j])): # count += 1 # self.neuro[w].axon_weigh[i][j][k] \ # = weigh[to_add + k + j * len(self.neuro[w].axon_weigh[i][j])] # else: # for i in range(1, len(self.spiders)): # self.neuro.append(Neuro(mutant_power=1)) def __set_parameters_of_neuro(self): self.life_time = config.CYCLE_TIME self.count_of_alive = config.COUNT_OF_ALIVE self.mutation_power = config.MUTATION_POWER def __set_parameters_for_loop_work(self): self.do_next_step = True self.flag = True self.counter = 0 def start(self): while True: with b0RemoteApi.RemoteApiClient(self.python_client, self.remote_api) as self.client: self.__add_method() self.__add_objects() self.__start_simulation() self.__loop() self.__finish_simulation() self.__remake_neural_network() time.sleep(1) def __add_method(self): self.client.simxSynchronous(True) self.client.simxGetSimulationStepStarted(self.client.simxDefaultSubscriber(self.simulationStepStarted)) self.client.simxGetSimulationStepDone(self.client.simxDefaultSubscriber(self.simulationStepDone)) def __add_objects(self): #err_hand_cube, self.obj_hund_cube = self.client.simxGetObjectHandle('Cuboid', self.client.simxServiceCall()) for spider in self.spiders: spider.set_robot(self.client) def __start_simulation(self): self.client.simxStartSimulation(self.client.simxDefaultPublisher()) def __loop(self): while self.flag: if self.do_next_step: self.do_next_step = False self.client.simxSynchronousTrigger() self.client.simxSpinOnce() def __finish_simulation(self): self.client.simxStopSimulation(self.client.simxDefaultPublisher()) def __remake_neural_network(self): self.remake_neural() def remake_neural(self): self.counter = 0 self.flag = True # self.fitnes = [] # self.fitnes_radical = [] # for i in range(len(self.spiders)): # self.fitnes[i] += ((self.spiders[i].get_position()[1] + 10) / 20.0) * self.life_time # self.fitnes_radical[i] += (self.spiders[i].get_position()[1]) * 10 * self.life_time # if self.fitnes_radical[i] <= 0: self.fitnes_radical[i] = 0.001 # if self.fitnes[i] <= 0: self.fitnes[i] = 0.001 # print(self.fitnes[i], self.fitnes_radical[i]) # # # self.max = 0 # for i in range(1, len(self.spiders)): # if (self.fitnes[i]> self.fitnes[self.max]): # self.max = i # print(max, self.spiders[self.max].get_position()[1]) # print("best: ", max, "//", len(self.neuro)) # for i in range(len(neuro_best.axon_weigh)): # for j in range(len(neuro_best.axon_weigh[i])): # print(i, j, neuro_best.axon_weigh[i][j]) # self.__make_parents() # self.__make_who_not_die() # self.__make_new_population() # self.__make_mutation() # self.__save_to_db() def __make_parents(self): self.__roulette() def __tournament(self): pass def __roulette(self): index = [] for i in range(len(self.neuro)): index.append(i) print(index) self.index_father = choices(index, weights = self.fitnes_radical, k = 1)[0] self.index_mother = choices(index, weights = self.fitnes_radical, k = 1)[0] while self.index_father == self.index_mother: self.index_mother = choices(index, weights=self.fitnes_radical, k=1)[0] self.neuro_father = self.neuro[self.index_father] self.neuro_mother = self.neuro[self.index_mother] def __make_who_not_die(self): self.alive = [] index = [] for i in range(len(self.neuro)): index.append(i) print(index) self.alive.append(choices(index, weights = self.fitnes_radical, k = 1)[0]) for i in range(self.count_of_alive - 1): else_number = choices(index, weights = self.fitnes_radical, k = 1)[0] while else_number in self.alive: print("Same {}".format(else_number)) else_number = choices(index, weights=self.fitnes_radical, k=1)[0] print("new {}, all {}".format(else_number, self.alive)) self.alive.append(else_number) def __make_new_population(self): neuro_new = [] for i in range(self.count_of_alive): neuro_new.append(self.neuro[self.alive[i]]) self.neuro = neuro_new for i in range(self.count_of_alive, len(self.spiders)): if random.random() > 0.5: self.neuro.append(Neuro.randomize_new(self.neuro_father, self.neuro_mother)) else: self.neuro.append(Neuro.randomize_new(self.neuro_mother, self.neuro_father)) for i in range(len(self.spiders)): self.spiders[i].reset_position() def __make_mutation(self): for i in range(len(self.spiders)): self.neuro[i].make_mutation(self.mutation_power) print("Зроблена мутація") def __save_to_db(self): print("Почався запис в ексель") # self.excel.write_data2D_best(self.fitnes[self.max], self.neuro[self.max].axon_weigh) # self.excel.write_data2D_father(self.fitnes[self.index_father], self.neuro_father.axon_weigh) # self.excel.write_data2D_mother(self.fitnes[self.index_mother], self.neuro_mother.axon_weigh) dict_of_data = {"max fitnes" : self.fitnes[self.max]} for i in range(len(self.spiders)): dict_of_data["fit {}".format(i)] = self.fitnes[i] for i in range(len(self.spiders)): axon_line = self.neuro[i].axon_line() for j in range(len(axon_line)): dict_of_data["s{}a{}".format(i, j)] = axon_line[j] self.csv_manager.extend_row_by_dicts() self.csv_manager.write_sometimes() print("Завершився запис в ексель") def simulationStepStarted(self, msg): simTime = msg[1][b'simulationTime'] print('Simulation step started', simTime) counter = 0 normal_angle = (0, -1.5707963705062866, 0) normal_z = 0.088 for spider in self.spiders: spider.receive_position(self.client) self.fitnes[counter] += 5 + \ - abs(self.spiders[counter].get_rotation()[0] - normal_angle[0]) \ - abs(self.spiders[counter].get_rotation()[1] - normal_angle[2]) \ - abs(self.spiders[counter].get_rotation()[2] - normal_angle[2]) \ - 5 * abs(self.spiders[counter].get_position()[2] - normal_z) self.fitnes_radical[counter] += 5 + \ - 1.2 * abs(self.spiders[counter].get_rotation()[0] - normal_angle[0]) \ - 1.2 * abs(self.spiders[counter].get_rotation()[1] - normal_angle[2]) \ - 1.2 * abs(self.spiders[counter].get_rotation()[2] - normal_angle[2]) \ - 6 * abs(self.spiders[counter].get_position()[2] - normal_z) print("spin", self.spiders[counter].get_rotation(), self.fitnes[counter], self.fitnes_radical[counter]) counter += 1 def simulationStepDone(self, msg): simTime = msg[1][b'simulationTime'] print('Simulation step done. Simulation time: ', simTime) for i in range(len(self.spiders)): self.spiders[i].move(self.client, output_data = self.neuro[i]._calculate(self.spiders[i].get_all())) self.do_next_step = True self.fitnes = [0] * len(self.spiders) self.fitnes_radical = [0] * len(self.spiders) self.__timer() def __timer(self): self.counter += 1 if self.counter > self.life_time: self.flag = False print(self.counter, "//", self.life_time)
41.964981
117
0.590357
from code.character import Character from code.spider import Spider from code.neuro import Neuro from code.excel import ExcelManager from code.csv_manager import CscManager import b0RemoteApi import code.config as config import time from random import choices import random class Scene: def __init__(self): self.__set_connetcion_variables_to_lib_files() self.__create_spiders() self.__create_or_connect_to_file() self.__create_neuro() self.__set_parameters_of_neuro() self.__set_parameters_for_loop_work() def __set_connetcion_variables_to_lib_files(self): self.python_client = 'b0RemoteApi_pythonClient' self.remote_api = 'b0RemoteApi_first' self.client = None def __create_spiders(self): self.spiders = [Spider()] for i in range(1, config.NUMBER_OF_SPIDERS): self.spiders.append(Spider("#{}".format(i - 1))) def __create_or_connect_to_file(self): self.csv_manager = CscManager(name=config.FILE_NAME) def __create_neuro(self): self.neuro = [] self.neuro_father = Neuro() self.neuro_mother = Neuro() self.neuro.append(Neuro()) self.fitnes = [0] * len(self.spiders) self.fitnes_radical = [0] * len(self.spiders) for i in range(1, len(self.spiders)): self.neuro.append(Neuro(mutant_power = 1)) def __set_parameters_of_neuro(self): self.life_time = config.CYCLE_TIME self.count_of_alive = config.COUNT_OF_ALIVE self.mutation_power = config.MUTATION_POWER def __set_parameters_for_loop_work(self): self.do_next_step = True self.flag = True self.counter = 0 def start(self): while True: with b0RemoteApi.RemoteApiClient(self.python_client, self.remote_api) as self.client: self.__add_method() self.__add_objects() self.__start_simulation() self.__loop() self.__finish_simulation() self.__remake_neural_network() time.sleep(1) def __add_method(self): self.client.simxSynchronous(True) self.client.simxGetSimulationStepStarted(self.client.simxDefaultSubscriber(self.simulationStepStarted)) self.client.simxGetSimulationStepDone(self.client.simxDefaultSubscriber(self.simulationStepDone)) def __add_objects(self): for spider in self.spiders: spider.set_robot(self.client) def __start_simulation(self): self.client.simxStartSimulation(self.client.simxDefaultPublisher()) def __loop(self): while self.flag: if self.do_next_step: self.do_next_step = False self.client.simxSynchronousTrigger() self.client.simxSpinOnce() def __finish_simulation(self): self.client.simxStopSimulation(self.client.simxDefaultPublisher()) def __remake_neural_network(self): self.remake_neural() def remake_neural(self): self.counter = 0 self.flag = True def __make_parents(self): self.__roulette() def __tournament(self): pass def __roulette(self): index = [] for i in range(len(self.neuro)): index.append(i) print(index) self.index_father = choices(index, weights = self.fitnes_radical, k = 1)[0] self.index_mother = choices(index, weights = self.fitnes_radical, k = 1)[0] while self.index_father == self.index_mother: self.index_mother = choices(index, weights=self.fitnes_radical, k=1)[0] self.neuro_father = self.neuro[self.index_father] self.neuro_mother = self.neuro[self.index_mother] def __make_who_not_die(self): self.alive = [] index = [] for i in range(len(self.neuro)): index.append(i) print(index) self.alive.append(choices(index, weights = self.fitnes_radical, k = 1)[0]) for i in range(self.count_of_alive - 1): else_number = choices(index, weights = self.fitnes_radical, k = 1)[0] while else_number in self.alive: print("Same {}".format(else_number)) else_number = choices(index, weights=self.fitnes_radical, k=1)[0] print("new {}, all {}".format(else_number, self.alive)) self.alive.append(else_number) def __make_new_population(self): neuro_new = [] for i in range(self.count_of_alive): neuro_new.append(self.neuro[self.alive[i]]) self.neuro = neuro_new for i in range(self.count_of_alive, len(self.spiders)): if random.random() > 0.5: self.neuro.append(Neuro.randomize_new(self.neuro_father, self.neuro_mother)) else: self.neuro.append(Neuro.randomize_new(self.neuro_mother, self.neuro_father)) for i in range(len(self.spiders)): self.spiders[i].reset_position() def __make_mutation(self): for i in range(len(self.spiders)): self.neuro[i].make_mutation(self.mutation_power) print("Зроблена мутація") def __save_to_db(self): print("Почався запис в ексель") dict_of_data = {"max fitnes" : self.fitnes[self.max]} for i in range(len(self.spiders)): dict_of_data["fit {}".format(i)] = self.fitnes[i] for i in range(len(self.spiders)): axon_line = self.neuro[i].axon_line() for j in range(len(axon_line)): dict_of_data["s{}a{}".format(i, j)] = axon_line[j] self.csv_manager.extend_row_by_dicts() self.csv_manager.write_sometimes() print("Завершився запис в ексель") def simulationStepStarted(self, msg): simTime = msg[1][b'simulationTime'] print('Simulation step started', simTime) counter = 0 normal_angle = (0, -1.5707963705062866, 0) normal_z = 0.088 for spider in self.spiders: spider.receive_position(self.client) self.fitnes[counter] += 5 + \ - abs(self.spiders[counter].get_rotation()[0] - normal_angle[0]) \ - abs(self.spiders[counter].get_rotation()[1] - normal_angle[2]) \ - abs(self.spiders[counter].get_rotation()[2] - normal_angle[2]) \ - 5 * abs(self.spiders[counter].get_position()[2] - normal_z) self.fitnes_radical[counter] += 5 + \ - 1.2 * abs(self.spiders[counter].get_rotation()[0] - normal_angle[0]) \ - 1.2 * abs(self.spiders[counter].get_rotation()[1] - normal_angle[2]) \ - 1.2 * abs(self.spiders[counter].get_rotation()[2] - normal_angle[2]) \ - 6 * abs(self.spiders[counter].get_position()[2] - normal_z) print("spin", self.spiders[counter].get_rotation(), self.fitnes[counter], self.fitnes_radical[counter]) counter += 1 def simulationStepDone(self, msg): simTime = msg[1][b'simulationTime'] print('Simulation step done. Simulation time: ', simTime) for i in range(len(self.spiders)): self.spiders[i].move(self.client, output_data = self.neuro[i]._calculate(self.spiders[i].get_all())) self.do_next_step = True self.fitnes = [0] * len(self.spiders) self.fitnes_radical = [0] * len(self.spiders) self.__timer() def __timer(self): self.counter += 1 if self.counter > self.life_time: self.flag = False print(self.counter, "//", self.life_time)
true
true
1c45f2188bc7857583ffee040cd434578399dbd7
24,516
py
Python
modules/webgrid2.py
dedebf/trilhas-poeticas-web2py-application
61b28a60143a8bdce84a9fd8511f6b4504a34f33
[ "MIT" ]
null
null
null
modules/webgrid2.py
dedebf/trilhas-poeticas-web2py-application
61b28a60143a8bdce84a9fd8511f6b4504a34f33
[ "MIT" ]
null
null
null
modules/webgrid2.py
dedebf/trilhas-poeticas-web2py-application
61b28a60143a8bdce84a9fd8511f6b4504a34f33
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ WebGrid for web2py Developed by Nathan Freeze (Copyright � 2009) Email <nathan@freezable.com> License: GPL v2 This file contains code to build a table that supports paging, sorting, editing and totals. """ ##################-----WEBGRID2 CRIADO PARA ALTERAR A FUNÇÃO page_total ONDE É COLOCADO IMAGEM AO INVEZ DE LINK NO----############## from gluon.sql import Rows, Field, Set from gluon.sqlhtml import * from gluon.html import * from gluon.storage import * class Webgrid2(object): def __init__(self, crud, name=None, datasource=None): self.crud = crud self.environment = crud.environment self.name = name self.css_prefix = None self.id = None self.datasource = datasource self.crud_function = 'data' self.download_function = 'download' self.messages = Messages(self.crud.environment.T) self.messages.confirm_delete = 'Are you sure?' self.messages.no_records = 'No records' self.messages.add_link = '[add %s]' self.messages.edit_link = 'edit' self.messages.delete_link = 'delete' self.messages.view_link = 'view' self.messages.file_link = 'anexo' self.messages.page_info = 'page %(pagenum)s of %(pagecount)s (total records: %(total)s)' self.messages.page_total = "Total:" self.messages.filter = 'Filtrar' self.messages.pagesize = ' pagesize: ' self.messages.previous_page = '<-prev-' self.messages.next_page = '-next->' self.action_links = ['view', 'edit', 'delete'] self.action_headers = ['view', 'edit', 'delete'] self.field_headers = self.fields = self.totals = [] self.enabled_rows = ['header', 'filter', 'pager', 'totals', 'footer', 'add_links'] self.allowed_vars = ['pagesize', 'pagenum', 'sortby', 'ascending', 'groupby', 'totals'] self.pagenum = self.pagecount = self.pagesize = 0 self.sortby = self.groupby = self.page_total = self.filters = None self.view_link = self.edit_link = self.delete_link = None self.add_links = self.action_header = None self.header = self.filter = self.footer = None self.pager = self.datarow = None self.pageinfo_separator = ' - ' self.pagesizes = [10,20,30,40,50] self.ascending = False self.row_created = None self.filter_query = lambda field,value: field==value self.filter_items_query = lambda field: field['id'] > 0 self.filter_cache = None self.total_function = lambda fieldvalues: sum(fieldvalues) def get_header(self, c): try: return self.field_headers[self.fields.index(c)] except: return c def get_value(self, f, r): (_t, _f) = f.split('.') v = r[_t][_f] if self.joined else r[_f] return v def update_filters(self,vrs,flt): if not flt: return for k, v in flt.items(): vrs[self.name + '_filter-'+k] = v def __call__(self): request = self.crud.environment.request db = self.crud.db datasource = self.datasource if not self.name: self.name = self.crud.environment.request.function if not self.css_prefix: self.css_prefix = self.name if not self.id: self.id = self.name # Set defaults vars = request.get_vars allowed = self.allowed_vars name = self.name if getattr(vars,name+'_pagesize') and 'pagesize' in allowed: self.pagesize = int(vars[name+'_pagesize']) if not self.pagesize: self.pagesize = 10 if getattr(vars,name+'_pagenum') and 'pagenum' in allowed: self.pagenum = int(vars[name+'_pagenum']) if not self.pagenum: self.pagenum = 1 if getattr(vars,name+'_sortby') and 'sortby' in allowed: self.sortby = vars[name+'_sortby'] if getattr(vars,name+'_groupby') and 'groupby' in allowed: self.groupby = vars[name+'_groupby'] if getattr(vars,name+'_totals') and 'totals' in allowed: self.totals = vars[name+'_totals'] if getattr(vars,name+'_ascending') and 'ascending' in allowed: self.ascending = vars[name+'_ascending'] == "True" page = sortby = groupby = query = None filters = dict() #Build filters if 'filter' in self.enabled_rows: if request.post_vars: request.vars.update(request.post_vars) for k, v in request.vars.items(): if isinstance(v,list): v = v[0] if name + '_filter-' in k: tf = k.split('-')[-1] filters[tf] = v for k, v in filters.items(): if v=='0': continue (ft,ff) = k.split('.') fld = db[ft][ff] if query: query &= self.filter_query(fld,v) else: query = self.filter_query(fld,v) if filters and request.vars.get(name+'_submit_filter'): self.pagenum = 1 # Build limitby if self.pagesize > 0: pagenum = self.pagenum - 1 page = (self.pagesize * pagenum, self.pagesize * pagenum + self.pagesize) else: self.pagenum = 0 # Build sortby if self.sortby: if isinstance(self.sortby, Field): (ts, fs) = (self.sortby._tablename, self.sortby.name) else: (ts, fs) = self.sortby.split('.') if self.ascending: sortby = db[ts][fs] else: sortby = ~db[ts][fs] if self.groupby: if isinstance(self.groupby, Field): (tg, fg) = (self.groupby._tablename, self.groupby.name) else: (tg, fg) = self.groupby.split('.') groupby = db[tg][fg] # Get rows rows = total = None if isinstance(datasource, Rows): rows = datasource joined = len(set(map(lambda c: c.split('.')[0], rows.colnames))) > 1 for k,v in filters.items(): if v=='0': continue (flt_t,flt_f) = k.split('.') if joined: rows = rows.find(lambda row: row[flt_t][flt_f]==v) else: rows = rows.find(lambda row: row[flt_f]==v) total = len(rows) if sortby and joined: rows = rows.sort(lambda row: row[ts][fs], reverse=self.ascending) elif sortby: rows = rows.sort(lambda row: row[fs], reverse=self.ascending) if self.pagesize > 0: rows = rows[page[0]:page[1]] elif isinstance(datasource, Set): if query: datasource = datasource(query) id_in_fields = [f for f in self.fields if f.split('.')[-1] == 'id'] idfield = self.fields[0].split('.')[0] +'.id' if not id_in_fields: self.fields.append(idfield) rows = datasource.select(limitby=page, orderby=sortby, groupby=groupby, *self.fields) if not id_in_fields: self.fields.remove(idfield) total = datasource.count() elif isinstance(datasource, Table): rows = db(query).select(datasource.ALL, limitby=page, orderby=sortby, groupby=groupby) total = db(datasource.id > 0).count() elif isinstance(datasource, list) and isinstance(datasource[0], Table): rows = db(query).select(limitby=page, orderby=sortby, groupby=groupby, *[t.ALL for t in datasource]) total = db(datasource[0].id > 0).count() else: raise AttributeError("Invalid datasource for WebGrid") self.tablenames = list(set(map(lambda c: c.split('.')[0], rows.colnames))) joined = len(self.tablenames) > 1 self.response = rows self.colnames = rows.colnames self.joined = joined self.total = total if not self.fields: self.fields = rows.colnames if isinstance(self.fields[0], Field): self.fields = ['%s.%s' % (f._tablename, f.name) for f in self.fields] if self.filters and isinstance(self.filters[0],Field): self.filters = ['%s.%s' % (f._tablename, f.name) for f in self.filters] if self.totals and isinstance(self.totals[0], Field): self.totals = ['%s.%s' % (f._tablename, f.name) for f in self.totals] if not self.filters: self.filters = self.fields if not self.field_headers: self.field_headers = [] for f in self.fields: (t,f) = f.split('.') field = db[t][f] if hasattr(field,'label'): self.field_headers.append(field.label) else: lbl = f.split('.')[1].replace("_", " ").capitalize() self.field_headers.append(lbl) if not self.action_headers: self.action_headers = self.action_links if not self.view_link and 'view' in self.action_links: self.view_link = lambda row: A(self.messages.view_link, _href=self.crud.url(f=self.crud_function, args=['read', self.tablenames[0], row[self.tablenames[0]]['id'] \ if self.joined else row['id']])) if not self.edit_link and 'edit' in self.action_links: self.edit_link = lambda row: A(self.messages.edit_link, _href=self.crud.url(f=self.crud_function, args=['update', self.tablenames[0], row[self.tablenames[0]]['id'] \ if self.joined else row['id']])) if not self.delete_link and 'delete' in self.action_links: self.delete_link = lambda row: A(self.messages.delete_link, _href=self.crud.url(f=self.crud_function, args=['delete', self.tablenames[0], row[self.tablenames[0]]['id'] \ if self.joined else row['id']]), _onclick="return confirm('%s');" % \ self.messages.confirm_delete) if not self.add_links and 'add_links' in self.enabled_rows: self.add_links = lambda tables: TR(TD([A(self.messages.add_link % t, _href=self.crud.url(f=self.crud_function, args=['create', t])) for t in self.tablenames], _colspan=len(self.action_headers)+ len(self.field_headers)), _class='-webgrid add_links') if not self.header and 'header' in self.enabled_rows: def header(fields): thead = TR([TH(c) for c in self.action_headers], _class='-webgrid header') for f in fields: vars = dict(request.get_vars) self.update_filters(vars,filters) vars[name+'_pagenum'] = 1 vars[name+'_sortby'] = f vars[name+'_ascending'] = not self.ascending href = URL(r=request,vars=vars,args=request.args) th = TH(A(self.get_header(f),_href=href)) thead.components.append(th) return thead self.header = header if not self.filter and 'filter' in self.enabled_rows: def filter(fields): tr = TR([TD('') for c in self.action_links], _class='-webgrid filter') if self.action_links: tr.components[-1] = TD(INPUT(_type='submit', _value=self.messages.filter, _name=name+'_submit_filter', _class="btn-sm")) #fix this for f in fields: if not f in self.filters: tr.components.append(TD('')) continue (tf,ff) = f.split('.') curfld = db[tf][ff] if curfld.type=='upload' or curfld.type=='blob': continue vals = db(self.filter_items_query(db[tf])).select(db[tf]['id'],curfld, cache=self.filter_cache) dval = filters.get(f) prev = [] opts = [] for v in vals: opt = None if curfld.type.startswith('reference '): if curfld.represent: rp = curfld.represent(v[ff]) if rp and not rp in prev: opt = OPTION(rp, _value=v[ff]) prev.append(rp) else: v = v[ff] if v and not v in prev: opt = OPTION(v,_value=v) prev.append(v) elif curfld.represent: rp = curfld.represent(v[ff]) if rp and not rp in prev: opt = OPTION(rp, _value=rp) prev.append(rp) else: if v[ff] and not v[ff] in prev: opt = OPTION(v[ff], _value=v[ff]) prev.append(v[ff]) if opt: opts.append(opt) opts.sort(key=lambda x: x.components[0]) inp = SELECT(opts, _name = name+'_filter-'+f,value=dval) inp.components.insert(0,OPTION('',_value='0')) tr.components.append(TD(inp)) return tr self.filter = filter if not self.footer and 'footer' in self.enabled_rows: def footer(fields): pageinfo = pagesize = '' pagelinks = SPAN(self.messages.pagesize) if not self.groupby: vars = dict(request.get_vars) self.update_filters(vars,filters) for p in self.pagesizes: vars[name+'_pagesize'] = p vars[name+'_pagenum'] = 1 lnk = A(str(p),' ',_href=URL(r=request,args=request.args, vars=vars)) pagelinks.components.append(lnk) pageinfo = self.messages.page_info % {'pagenum':self.pagenum, 'pagecount':self.pagecount, 'total':self.total} tr = TR(_class='-webgrid footer') td = TD(pageinfo,self.pageinfo_separator,pagelinks, _colspan=len(self.fields) + len(self.action_links)) tr.components.append(td) return tr self.footer = footer if not self.pager and 'pager' in self.enabled_rows: def pager(pagecount): vars = dict(request.get_vars) self.update_filters(vars,filters) prev = A(self.messages.previous_page, _href="#") next = A(self.messages.next_page, _href="#") if self.pagesize > 0 and pagenum > 0: vars[name+'_pagenum'] = self.pagenum - 1 prev = A(B(self.messages.previous_page), _href=URL(r=request,vars=vars,args=request.args)) if self.pagesize > 0 and self.pagenum < pagecount and \ len(self.response) >= self.pagesize: vars[name+'_pagenum'] = self.pagenum + 1 next = A(B(self.messages.next_page), _href=URL(r=request,vars=vars,args=request.args)) tr = TR(_class='-webgrid pager') td = TD(prev,_colspan=len(self.fields) + len(self.action_links) ) for x in xrange(1, pagecount + 1): if not self.groupby: vars[name+'_pagenum'] = x href = URL(r=request,vars=vars,args=request.args) td.components.append(A(x,'-',_href=href)) td.components.append(next) tr.components.append(td) return tr self.pager = pager if not self.page_total and 'totals' in self.enabled_rows: def page_total(): pagetotal = TR(['' for l in self.action_links], _class='-webgrid totals') if self.action_links: pagetotal.components[-1] = TD(self.messages.page_total) for f in self.fields: if f in self.totals: fieldvalues = [self.get_value(f, r) for r in self.response] fieldtotal = self.total_function(fieldvalues) pagetotal.components.append(TD(fieldtotal)) else: pagetotal.components.append(TD()) return pagetotal self.page_total = page_total if not self.action_links: if self.totals or self.filters: self.action_links = ['delete'] self.action_headers = [''] self.delete_link = lambda row: ' ' table_field = re.compile('[\w_]+\.[\w_]+') table = TABLE(_id=self.id, _class="table-striped") if 'header' in self.enabled_rows: _row = self.header(self.fields) if self.row_created: self.row_created(_row,'header',None) table.components.append(THEAD(_row)) if 'filter' in self.enabled_rows: _row = self.filter(self.fields) if self.row_created: self.row_created(_row,'filter',None) table.components.append(_row) if len(rows) == 0: table.components.append(TR(TD(self.messages.no_records, _colspan=len(self.fields) + len(self.action_links), _style="text-align:center;"))) for (rc, row) in enumerate(rows): if self.datarow: _row = self.datarow(row) if self.row_created: self.row_created(_row,'datarow',row) table.components.append(_row) continue _class = 'even' if rc % 2 == 0 else 'odd' tr = TR(_class='-webgrid-row %s' % _class) if 'view' in self.action_links: tr.components.append(TD(self.view_link(row), _class='-webgrid view_link')) if 'edit' in self.action_links: tr.components.append(TD(self.edit_link(row), _class='-webgrid edit_link')) if 'delete' in self.action_links: tr.components.append(TD(self.delete_link(row), _class='-webgrid delete_link')) for colname in self.fields: if not table_field.match(colname): r = row._extra[colname] tr.components.append(TD(r)) continue (tablename, fieldname) = colname.split('.') field = rows.db[tablename][fieldname] r = row[tablename][fieldname] if joined else row[fieldname] if field.represent: r = field.represent(r) tr.components.append(TD(r)) continue if field.type == 'blob' and r: tr.components.append(TD('DATA')) continue r = str(field.formatter(r)) if field.type == 'upload': if r: tr.components.append(TD(A(IMG(_src=URL(r=self.environment.request, f=self.download_function, args=r), _width='82px'), _href=URL(r=self.environment.request, f=self.download_function, args=r)))) else: tr.components.append(TD(self.messages.file_link)) continue tr.components.append(TD(r)) if self.row_created: self.row_created(tr,'datarow',row) table.components.append(tr) if self.pagesize > 0: pagecount = int(total / self.pagesize) if total % self.pagesize != 0: pagecount += 1 else: pagecount = 1 self.pagecount = pagecount footer_wrap = TFOOT() if 'totals' in self.enabled_rows and len(rows): _row = self.page_total() if self.row_created: self.row_created(_row,'totals',None) footer_wrap.components.append(_row) if 'add_links' in self.enabled_rows: _row = self.add_links(self.tablenames) if self.row_created: self.row_created(_row,'add_links',None) footer_wrap.components.append(_row) if 'pager' in self.enabled_rows and len(rows): _row = self.pager(pagecount) if self.row_created: self.row_created(_row,'pager',None) footer_wrap.components.append(_row) if 'footer' in self.enabled_rows and len(rows): _row = self.footer(self.fields) if self.row_created: self.row_created(_row,'footer',None) footer_wrap.components.append(_row) table.components.append(footer_wrap) return FORM(table,_class='webgrid',_name=name+'-webgrid-form') def links_right(tablerow,rowtype,rowdata): if rowtype != 'pager': links = tablerow.components[:3] del tablerow.components[:3] tablerow.components.extend(links)
45.653631
132
0.465206
=value self.filter_items_query = lambda field: field['id'] > 0 self.filter_cache = None self.total_function = lambda fieldvalues: sum(fieldvalues) def get_header(self, c): try: return self.field_headers[self.fields.index(c)] except: return c def get_value(self, f, r): (_t, _f) = f.split('.') v = r[_t][_f] if self.joined else r[_f] return v def update_filters(self,vrs,flt): if not flt: return for k, v in flt.items(): vrs[self.name + '_filter-'+k] = v def __call__(self): request = self.crud.environment.request db = self.crud.db datasource = self.datasource if not self.name: self.name = self.crud.environment.request.function if not self.css_prefix: self.css_prefix = self.name if not self.id: self.id = self.name vars = request.get_vars allowed = self.allowed_vars name = self.name if getattr(vars,name+'_pagesize') and 'pagesize' in allowed: self.pagesize = int(vars[name+'_pagesize']) if not self.pagesize: self.pagesize = 10 if getattr(vars,name+'_pagenum') and 'pagenum' in allowed: self.pagenum = int(vars[name+'_pagenum']) if not self.pagenum: self.pagenum = 1 if getattr(vars,name+'_sortby') and 'sortby' in allowed: self.sortby = vars[name+'_sortby'] if getattr(vars,name+'_groupby') and 'groupby' in allowed: self.groupby = vars[name+'_groupby'] if getattr(vars,name+'_totals') and 'totals' in allowed: self.totals = vars[name+'_totals'] if getattr(vars,name+'_ascending') and 'ascending' in allowed: self.ascending = vars[name+'_ascending'] == "True" page = sortby = groupby = query = None filters = dict() if 'filter' in self.enabled_rows: if request.post_vars: request.vars.update(request.post_vars) for k, v in request.vars.items(): if isinstance(v,list): v = v[0] if name + '_filter-' in k: tf = k.split('-')[-1] filters[tf] = v for k, v in filters.items(): if v=='0': continue (ft,ff) = k.split('.') fld = db[ft][ff] if query: query &= self.filter_query(fld,v) else: query = self.filter_query(fld,v) if filters and request.vars.get(name+'_submit_filter'): self.pagenum = 1 if self.pagesize > 0: pagenum = self.pagenum - 1 page = (self.pagesize * pagenum, self.pagesize * pagenum + self.pagesize) else: self.pagenum = 0 if self.sortby: if isinstance(self.sortby, Field): (ts, fs) = (self.sortby._tablename, self.sortby.name) else: (ts, fs) = self.sortby.split('.') if self.ascending: sortby = db[ts][fs] else: sortby = ~db[ts][fs] if self.groupby: if isinstance(self.groupby, Field): (tg, fg) = (self.groupby._tablename, self.groupby.name) else: (tg, fg) = self.groupby.split('.') groupby = db[tg][fg] rows = total = None if isinstance(datasource, Rows): rows = datasource joined = len(set(map(lambda c: c.split('.')[0], rows.colnames))) > 1 for k,v in filters.items(): if v=='0': continue (flt_t,flt_f) = k.split('.') if joined: rows = rows.find(lambda row: row[flt_t][flt_f]==v) else: rows = rows.find(lambda row: row[flt_f]==v) total = len(rows) if sortby and joined: rows = rows.sort(lambda row: row[ts][fs], reverse=self.ascending) elif sortby: rows = rows.sort(lambda row: row[fs], reverse=self.ascending) if self.pagesize > 0: rows = rows[page[0]:page[1]] elif isinstance(datasource, Set): if query: datasource = datasource(query) id_in_fields = [f for f in self.fields if f.split('.')[-1] == 'id'] idfield = self.fields[0].split('.')[0] +'.id' if not id_in_fields: self.fields.append(idfield) rows = datasource.select(limitby=page, orderby=sortby, groupby=groupby, *self.fields) if not id_in_fields: self.fields.remove(idfield) total = datasource.count() elif isinstance(datasource, Table): rows = db(query).select(datasource.ALL, limitby=page, orderby=sortby, groupby=groupby) total = db(datasource.id > 0).count() elif isinstance(datasource, list) and isinstance(datasource[0], Table): rows = db(query).select(limitby=page, orderby=sortby, groupby=groupby, *[t.ALL for t in datasource]) total = db(datasource[0].id > 0).count() else: raise AttributeError("Invalid datasource for WebGrid") self.tablenames = list(set(map(lambda c: c.split('.')[0], rows.colnames))) joined = len(self.tablenames) > 1 self.response = rows self.colnames = rows.colnames self.joined = joined self.total = total if not self.fields: self.fields = rows.colnames if isinstance(self.fields[0], Field): self.fields = ['%s.%s' % (f._tablename, f.name) for f in self.fields] if self.filters and isinstance(self.filters[0],Field): self.filters = ['%s.%s' % (f._tablename, f.name) for f in self.filters] if self.totals and isinstance(self.totals[0], Field): self.totals = ['%s.%s' % (f._tablename, f.name) for f in self.totals] if not self.filters: self.filters = self.fields if not self.field_headers: self.field_headers = [] for f in self.fields: (t,f) = f.split('.') field = db[t][f] if hasattr(field,'label'): self.field_headers.append(field.label) else: lbl = f.split('.')[1].replace("_", " ").capitalize() self.field_headers.append(lbl) if not self.action_headers: self.action_headers = self.action_links if not self.view_link and 'view' in self.action_links: self.view_link = lambda row: A(self.messages.view_link, _href=self.crud.url(f=self.crud_function, args=['read', self.tablenames[0], row[self.tablenames[0]]['id'] \ if self.joined else row['id']])) if not self.edit_link and 'edit' in self.action_links: self.edit_link = lambda row: A(self.messages.edit_link, _href=self.crud.url(f=self.crud_function, args=['update', self.tablenames[0], row[self.tablenames[0]]['id'] \ if self.joined else row['id']])) if not self.delete_link and 'delete' in self.action_links: self.delete_link = lambda row: A(self.messages.delete_link, _href=self.crud.url(f=self.crud_function, args=['delete', self.tablenames[0], row[self.tablenames[0]]['id'] \ if self.joined else row['id']]), _onclick="return confirm('%s');" % \ self.messages.confirm_delete) if not self.add_links and 'add_links' in self.enabled_rows: self.add_links = lambda tables: TR(TD([A(self.messages.add_link % t, _href=self.crud.url(f=self.crud_function, args=['create', t])) for t in self.tablenames], _colspan=len(self.action_headers)+ len(self.field_headers)), _class='-webgrid add_links') if not self.header and 'header' in self.enabled_rows: def header(fields): thead = TR([TH(c) for c in self.action_headers], _class='-webgrid header') for f in fields: vars = dict(request.get_vars) self.update_filters(vars,filters) vars[name+'_pagenum'] = 1 vars[name+'_sortby'] = f vars[name+'_ascending'] = not self.ascending href = URL(r=request,vars=vars,args=request.args) th = TH(A(self.get_header(f),_href=href)) thead.components.append(th) return thead self.header = header if not self.filter and 'filter' in self.enabled_rows: def filter(fields): tr = TR([TD('') for c in self.action_links], _class='-webgrid filter') if self.action_links: tr.components[-1] = TD(INPUT(_type='submit', _value=self.messages.filter, _name=name+'_submit_filter', _class="btn-sm")) for f in fields: if not f in self.filters: tr.components.append(TD('')) continue (tf,ff) = f.split('.') curfld = db[tf][ff] if curfld.type=='upload' or curfld.type=='blob': continue vals = db(self.filter_items_query(db[tf])).select(db[tf]['id'],curfld, cache=self.filter_cache) dval = filters.get(f) prev = [] opts = [] for v in vals: opt = None if curfld.type.startswith('reference '): if curfld.represent: rp = curfld.represent(v[ff]) if rp and not rp in prev: opt = OPTION(rp, _value=v[ff]) prev.append(rp) else: v = v[ff] if v and not v in prev: opt = OPTION(v,_value=v) prev.append(v) elif curfld.represent: rp = curfld.represent(v[ff]) if rp and not rp in prev: opt = OPTION(rp, _value=rp) prev.append(rp) else: if v[ff] and not v[ff] in prev: opt = OPTION(v[ff], _value=v[ff]) prev.append(v[ff]) if opt: opts.append(opt) opts.sort(key=lambda x: x.components[0]) inp = SELECT(opts, _name = name+'_filter-'+f,value=dval) inp.components.insert(0,OPTION('',_value='0')) tr.components.append(TD(inp)) return tr self.filter = filter if not self.footer and 'footer' in self.enabled_rows: def footer(fields): pageinfo = pagesize = '' pagelinks = SPAN(self.messages.pagesize) if not self.groupby: vars = dict(request.get_vars) self.update_filters(vars,filters) for p in self.pagesizes: vars[name+'_pagesize'] = p vars[name+'_pagenum'] = 1 lnk = A(str(p),' ',_href=URL(r=request,args=request.args, vars=vars)) pagelinks.components.append(lnk) pageinfo = self.messages.page_info % {'pagenum':self.pagenum, 'pagecount':self.pagecount, 'total':self.total} tr = TR(_class='-webgrid footer') td = TD(pageinfo,self.pageinfo_separator,pagelinks, _colspan=len(self.fields) + len(self.action_links)) tr.components.append(td) return tr self.footer = footer if not self.pager and 'pager' in self.enabled_rows: def pager(pagecount): vars = dict(request.get_vars) self.update_filters(vars,filters) prev = A(self.messages.previous_page, _href="#") next = A(self.messages.next_page, _href="#") if self.pagesize > 0 and pagenum > 0: vars[name+'_pagenum'] = self.pagenum - 1 prev = A(B(self.messages.previous_page), _href=URL(r=request,vars=vars,args=request.args)) if self.pagesize > 0 and self.pagenum < pagecount and \ len(self.response) >= self.pagesize: vars[name+'_pagenum'] = self.pagenum + 1 next = A(B(self.messages.next_page), _href=URL(r=request,vars=vars,args=request.args)) tr = TR(_class='-webgrid pager') td = TD(prev,_colspan=len(self.fields) + len(self.action_links) ) for x in xrange(1, pagecount + 1): if not self.groupby: vars[name+'_pagenum'] = x href = URL(r=request,vars=vars,args=request.args) td.components.append(A(x,'-',_href=href)) td.components.append(next) tr.components.append(td) return tr self.pager = pager if not self.page_total and 'totals' in self.enabled_rows: def page_total(): pagetotal = TR(['' for l in self.action_links], _class='-webgrid totals') if self.action_links: pagetotal.components[-1] = TD(self.messages.page_total) for f in self.fields: if f in self.totals: fieldvalues = [self.get_value(f, r) for r in self.response] fieldtotal = self.total_function(fieldvalues) pagetotal.components.append(TD(fieldtotal)) else: pagetotal.components.append(TD()) return pagetotal self.page_total = page_total if not self.action_links: if self.totals or self.filters: self.action_links = ['delete'] self.action_headers = [''] self.delete_link = lambda row: ' ' table_field = re.compile('[\w_]+\.[\w_]+') table = TABLE(_id=self.id, _class="table-striped") if 'header' in self.enabled_rows: _row = self.header(self.fields) if self.row_created: self.row_created(_row,'header',None) table.components.append(THEAD(_row)) if 'filter' in self.enabled_rows: _row = self.filter(self.fields) if self.row_created: self.row_created(_row,'filter',None) table.components.append(_row) if len(rows) == 0: table.components.append(TR(TD(self.messages.no_records, _colspan=len(self.fields) + len(self.action_links), _style="text-align:center;"))) for (rc, row) in enumerate(rows): if self.datarow: _row = self.datarow(row) if self.row_created: self.row_created(_row,'datarow',row) table.components.append(_row) continue _class = 'even' if rc % 2 == 0 else 'odd' tr = TR(_class='-webgrid-row %s' % _class) if 'view' in self.action_links: tr.components.append(TD(self.view_link(row), _class='-webgrid view_link')) if 'edit' in self.action_links: tr.components.append(TD(self.edit_link(row), _class='-webgrid edit_link')) if 'delete' in self.action_links: tr.components.append(TD(self.delete_link(row), _class='-webgrid delete_link')) for colname in self.fields: if not table_field.match(colname): r = row._extra[colname] tr.components.append(TD(r)) continue (tablename, fieldname) = colname.split('.') field = rows.db[tablename][fieldname] r = row[tablename][fieldname] if joined else row[fieldname] if field.represent: r = field.represent(r) tr.components.append(TD(r)) continue if field.type == 'blob' and r: tr.components.append(TD('DATA')) continue r = str(field.formatter(r)) if field.type == 'upload': if r: tr.components.append(TD(A(IMG(_src=URL(r=self.environment.request, f=self.download_function, args=r), _width='82px'), _href=URL(r=self.environment.request, f=self.download_function, args=r)))) else: tr.components.append(TD(self.messages.file_link)) continue tr.components.append(TD(r)) if self.row_created: self.row_created(tr,'datarow',row) table.components.append(tr) if self.pagesize > 0: pagecount = int(total / self.pagesize) if total % self.pagesize != 0: pagecount += 1 else: pagecount = 1 self.pagecount = pagecount footer_wrap = TFOOT() if 'totals' in self.enabled_rows and len(rows): _row = self.page_total() if self.row_created: self.row_created(_row,'totals',None) footer_wrap.components.append(_row) if 'add_links' in self.enabled_rows: _row = self.add_links(self.tablenames) if self.row_created: self.row_created(_row,'add_links',None) footer_wrap.components.append(_row) if 'pager' in self.enabled_rows and len(rows): _row = self.pager(pagecount) if self.row_created: self.row_created(_row,'pager',None) footer_wrap.components.append(_row) if 'footer' in self.enabled_rows and len(rows): _row = self.footer(self.fields) if self.row_created: self.row_created(_row,'footer',None) footer_wrap.components.append(_row) table.components.append(footer_wrap) return FORM(table,_class='webgrid',_name=name+'-webgrid-form') def links_right(tablerow,rowtype,rowdata): if rowtype != 'pager': links = tablerow.components[:3] del tablerow.components[:3] tablerow.components.extend(links)
true
true
1c45f5a3f7513812d906fa04e329c3a1e9236159
1,065
py
Python
edmunds/foundation/concerns/serviceproviders.py
LowieHuyghe/edmunds-python
236d087746cb8802a8854b2706b8d3ff009e9209
[ "Apache-2.0" ]
4
2017-09-07T13:39:50.000Z
2018-05-31T16:14:50.000Z
edmunds/foundation/concerns/serviceproviders.py
LowieHuyghe/edmunds-python
236d087746cb8802a8854b2706b8d3ff009e9209
[ "Apache-2.0" ]
103
2017-03-19T15:58:21.000Z
2018-07-11T20:36:17.000Z
edmunds/foundation/concerns/serviceproviders.py
LowieHuyghe/edmunds-python
236d087746cb8802a8854b2706b8d3ff009e9209
[ "Apache-2.0" ]
2
2017-10-14T15:20:11.000Z
2018-04-20T09:55:44.000Z
from threading import Lock class ServiceProviders(object): """ This class concerns service providers code for Application to extend from """ def register(self, class_): """ Register a Service Provider :param class_: The class of the provider :type class_: ServiceProvider """ lock_key = 'edmunds.serviceprovider.lock' providers_key = 'edmunds.serviceprovider.providers' # Register the lock if lock_key not in self.extensions: self.extensions[lock_key] = Lock() # Define list to register providers if providers_key not in self.extensions: with self.extensions[lock_key]: if providers_key not in self.extensions: self.extensions[providers_key] = [] # Only register a provider once if class_ in self.extensions[providers_key]: return self.extensions[providers_key].append(class_) service_provider = class_(self) service_provider.register()
28.783784
77
0.629108
from threading import Lock class ServiceProviders(object): def register(self, class_): lock_key = 'edmunds.serviceprovider.lock' providers_key = 'edmunds.serviceprovider.providers' if lock_key not in self.extensions: self.extensions[lock_key] = Lock() if providers_key not in self.extensions: with self.extensions[lock_key]: if providers_key not in self.extensions: self.extensions[providers_key] = [] if class_ in self.extensions[providers_key]: return self.extensions[providers_key].append(class_) service_provider = class_(self) service_provider.register()
true
true
1c45f686a688b7c613282c5f90a0c54d646b4457
4,988
py
Python
mmdet/distillation/distillers/csd_distiller.py
Senwang98/Lightweight-Detection-and-KD
7d6a4c02d922d4ed0920c9108f1f06dd63c5e90b
[ "Apache-2.0" ]
8
2021-12-28T02:47:16.000Z
2022-03-28T13:13:49.000Z
mmdet/distillation/distillers/csd_distiller.py
Senwang98/Lightweight-Detection-and-KD
7d6a4c02d922d4ed0920c9108f1f06dd63c5e90b
[ "Apache-2.0" ]
1
2022-03-29T10:52:49.000Z
2022-03-31T01:28:01.000Z
mmdet/distillation/distillers/csd_distiller.py
Senwang98/Lightweight-Detection-and-KD
7d6a4c02d922d4ed0920c9108f1f06dd63c5e90b
[ "Apache-2.0" ]
null
null
null
import torch.nn as nn import torch.nn.functional as F import torch from mmdet.models.detectors.base import BaseDetector from mmdet.models import build_detector from mmcv.runner import load_checkpoint, _load_checkpoint, load_state_dict from ..builder import DISTILLER, build_distill_loss from collections import OrderedDict @DISTILLER.register_module() class CSD_DetectionDistiller(BaseDetector): """Base distiller for detectors. It typically consists of teacher_model and student_model. """ def __init__(self, teacher_cfg, student_cfg, distill_cfg=None, teacher_pretrained=None, init_student=False): super(CSD_DetectionDistiller, self).__init__() self.teacher = build_detector(teacher_cfg.model, train_cfg=teacher_cfg.get('train_cfg'), test_cfg=teacher_cfg.get('test_cfg')) self.init_weights_teacher(teacher_pretrained) self.teacher.eval() self.student = build_detector(student_cfg.model, train_cfg=student_cfg.get('train_cfg'), test_cfg=student_cfg.get('test_cfg')) # inheriting strategy if init_student: t_checkpoint = _load_checkpoint(teacher_pretrained) all_name = [] for name, v in t_checkpoint["state_dict"].items(): if name.startswith("backbone."): continue else: all_name.append((name, v)) state_dict = OrderedDict(all_name) load_state_dict(self.student, state_dict) self.distill_losses = nn.ModuleDict() self.distill_cfg = distill_cfg for item_loc in distill_cfg: for item_loss in item_loc.methods: loss_name = item_loss.name self.distill_losses[loss_name] = build_distill_loss(item_loss) def base_parameters(self): return nn.ModuleList([self.student, self.distill_losses]) @property def with_neck(self): """bool: whether the detector has a neck""" return hasattr(self.student, 'neck') and self.student.neck is not None @property def with_shared_head(self): """bool: whether the detector has a shared head in the RoI Head""" return hasattr(self.student, 'roi_head') and self.student.roi_head.with_shared_head @property def with_bbox(self): """bool: whether the detector has a bbox head""" return ((hasattr(self.student, 'roi_head') and self.student.roi_head.with_bbox) or (hasattr(self.student, 'bbox_head') and self.student.bbox_head is not None)) @property def with_mask(self): """bool: whether the detector has a mask head""" return ((hasattr(self.student, 'roi_head') and self.student.roi_head.with_mask) or (hasattr(self.student, 'mask_head') and self.student.mask_head is not None)) def init_weights_teacher(self, path=None): """Load the pretrained model in teacher detector. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ checkpoint = load_checkpoint(self.teacher, path, map_location='cpu') def forward_train(self, img, img_metas, **kwargs): """ Args: img (Tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. Returns: dict[str, Tensor]: A dictionary of loss components(student's losses and distiller's losses). """ with torch.no_grad(): self.teacher.eval() fea_t = self.teacher.extract_feat(img) student_feat = self.student.extract_feat(img) student_loss = self.student.bbox_head.forward_train( student_feat, img_metas, **kwargs) for i in range(len(student_feat)): loss_name = 'loss_csd_fpn_'+str(i) student_loss[loss_name] = self.distill_losses[loss_name]( student_feat[i], fea_t[i].detach(), kwargs['gt_bboxes'], img_metas) return student_loss def simple_test(self, img, img_metas, **kwargs): return self.student.simple_test(img, img_metas, **kwargs) def aug_test(self, imgs, img_metas, **kwargs): return self.student.aug_test(imgs, img_metas, **kwargs) def extract_feat(self, imgs): """Extract features from images.""" return self.student.extract_feat(imgs)
38.666667
104
0.621893
import torch.nn as nn import torch.nn.functional as F import torch from mmdet.models.detectors.base import BaseDetector from mmdet.models import build_detector from mmcv.runner import load_checkpoint, _load_checkpoint, load_state_dict from ..builder import DISTILLER, build_distill_loss from collections import OrderedDict @DISTILLER.register_module() class CSD_DetectionDistiller(BaseDetector): def __init__(self, teacher_cfg, student_cfg, distill_cfg=None, teacher_pretrained=None, init_student=False): super(CSD_DetectionDistiller, self).__init__() self.teacher = build_detector(teacher_cfg.model, train_cfg=teacher_cfg.get('train_cfg'), test_cfg=teacher_cfg.get('test_cfg')) self.init_weights_teacher(teacher_pretrained) self.teacher.eval() self.student = build_detector(student_cfg.model, train_cfg=student_cfg.get('train_cfg'), test_cfg=student_cfg.get('test_cfg')) if init_student: t_checkpoint = _load_checkpoint(teacher_pretrained) all_name = [] for name, v in t_checkpoint["state_dict"].items(): if name.startswith("backbone."): continue else: all_name.append((name, v)) state_dict = OrderedDict(all_name) load_state_dict(self.student, state_dict) self.distill_losses = nn.ModuleDict() self.distill_cfg = distill_cfg for item_loc in distill_cfg: for item_loss in item_loc.methods: loss_name = item_loss.name self.distill_losses[loss_name] = build_distill_loss(item_loss) def base_parameters(self): return nn.ModuleList([self.student, self.distill_losses]) @property def with_neck(self): return hasattr(self.student, 'neck') and self.student.neck is not None @property def with_shared_head(self): return hasattr(self.student, 'roi_head') and self.student.roi_head.with_shared_head @property def with_bbox(self): return ((hasattr(self.student, 'roi_head') and self.student.roi_head.with_bbox) or (hasattr(self.student, 'bbox_head') and self.student.bbox_head is not None)) @property def with_mask(self): return ((hasattr(self.student, 'roi_head') and self.student.roi_head.with_mask) or (hasattr(self.student, 'mask_head') and self.student.mask_head is not None)) def init_weights_teacher(self, path=None): checkpoint = load_checkpoint(self.teacher, path, map_location='cpu') def forward_train(self, img, img_metas, **kwargs): with torch.no_grad(): self.teacher.eval() fea_t = self.teacher.extract_feat(img) student_feat = self.student.extract_feat(img) student_loss = self.student.bbox_head.forward_train( student_feat, img_metas, **kwargs) for i in range(len(student_feat)): loss_name = 'loss_csd_fpn_'+str(i) student_loss[loss_name] = self.distill_losses[loss_name]( student_feat[i], fea_t[i].detach(), kwargs['gt_bboxes'], img_metas) return student_loss def simple_test(self, img, img_metas, **kwargs): return self.student.simple_test(img, img_metas, **kwargs) def aug_test(self, imgs, img_metas, **kwargs): return self.student.aug_test(imgs, img_metas, **kwargs) def extract_feat(self, imgs): return self.student.extract_feat(imgs)
true
true
1c45f6fcf56f16cac02a648a17e1f72c3d8a6b99
270
py
Python
tests/basics/bytearray_construct.py
peterson79/pycom-micropython-sigfox
3f93fc2c02567c96f18cff4af9125db8fd7a6fb4
[ "MIT" ]
37
2017-12-07T15:49:29.000Z
2022-03-16T16:01:38.000Z
tests/basics/bytearray_construct.py
peterson79/pycom-micropython-sigfox
3f93fc2c02567c96f18cff4af9125db8fd7a6fb4
[ "MIT" ]
27
2015-01-02T16:17:37.000Z
2015-09-07T19:21:26.000Z
tests/basics/bytearray_construct.py
peterson79/pycom-micropython-sigfox
3f93fc2c02567c96f18cff4af9125db8fd7a6fb4
[ "MIT" ]
22
2016-08-01T01:35:30.000Z
2022-03-22T18:12:23.000Z
# test construction of bytearray from different objects from array import array # bytes, tuple, list print(bytearray(b'123')) print(bytearray((1, 2))) print(bytearray([1, 2])) # arrays print(bytearray(array('b', [1, 2]))) print(bytearray(array('h', [0x101, 0x202])))
20.769231
55
0.7
from array import array print(bytearray(b'123')) print(bytearray((1, 2))) print(bytearray([1, 2])) print(bytearray(array('b', [1, 2]))) print(bytearray(array('h', [0x101, 0x202])))
true
true
1c45fb3f361b4037f9e9310bf53c677582ab3001
2,473
py
Python
boto/pyami/startup.py
rectalogic/boto
1ac79d0c984bfd83f26e7c3af4877a731a63ecc2
[ "MIT" ]
1
2019-06-22T23:31:13.000Z
2019-06-22T23:31:13.000Z
boto/pyami/startup.py
rectalogic/boto
1ac79d0c984bfd83f26e7c3af4877a731a63ecc2
[ "MIT" ]
null
null
null
boto/pyami/startup.py
rectalogic/boto
1ac79d0c984bfd83f26e7c3af4877a731a63ecc2
[ "MIT" ]
null
null
null
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import sys import boto from boto.utils import find_class from boto import config from boto.pyami.scriptbase import ScriptBase class Startup(ScriptBase): def run_scripts(self): scripts = config.get('Pyami', 'scripts') if scripts: for script in scripts.split(','): script = script.strip(" ") try: pos = script.rfind('.') if pos > 0: mod_name = script[0:pos] cls_name = script[pos+1:] cls = find_class(mod_name, cls_name) boto.log.info('Running Script: %s' % script) s = cls() s.main() else: boto.log.warning('Trouble parsing script: %s' % script) except Exception as e: boto.log.exception('Problem Running Script: %s. Startup process halting.' % script) raise e def main(self): self.run_scripts() self.notify('Startup Completed for %s' % config.get('Instance', 'instance-id')) if __name__ == "__main__": if not config.has_section('loggers'): boto.set_file_logger('startup', '/var/log/boto.log') sys.path.append(config.get('Pyami', 'working_dir')) su = Startup() su.main()
40.540984
103
0.630004
import sys import boto from boto.utils import find_class from boto import config from boto.pyami.scriptbase import ScriptBase class Startup(ScriptBase): def run_scripts(self): scripts = config.get('Pyami', 'scripts') if scripts: for script in scripts.split(','): script = script.strip(" ") try: pos = script.rfind('.') if pos > 0: mod_name = script[0:pos] cls_name = script[pos+1:] cls = find_class(mod_name, cls_name) boto.log.info('Running Script: %s' % script) s = cls() s.main() else: boto.log.warning('Trouble parsing script: %s' % script) except Exception as e: boto.log.exception('Problem Running Script: %s. Startup process halting.' % script) raise e def main(self): self.run_scripts() self.notify('Startup Completed for %s' % config.get('Instance', 'instance-id')) if __name__ == "__main__": if not config.has_section('loggers'): boto.set_file_logger('startup', '/var/log/boto.log') sys.path.append(config.get('Pyami', 'working_dir')) su = Startup() su.main()
true
true
1c45fb42c9cea7abcaef7ad6b5250326ab3d502e
73,700
py
Python
ambassador/tests/t_tls.py
jhsiaomei/ambassador
c2726366612e31b74c177329f51265b5ad0f8df7
[ "Apache-2.0" ]
null
null
null
ambassador/tests/t_tls.py
jhsiaomei/ambassador
c2726366612e31b74c177329f51265b5ad0f8df7
[ "Apache-2.0" ]
null
null
null
ambassador/tests/t_tls.py
jhsiaomei/ambassador
c2726366612e31b74c177329f51265b5ad0f8df7
[ "Apache-2.0" ]
null
null
null
from kat.harness import Query from abstract_tests import AmbassadorTest, HTTP, ServiceType class TLSContextsTest(AmbassadorTest): """ This test makes sure that TLS is not turned on when it's not intended to. For example, when an 'upstream' TLS configuration is passed, the port is not supposed to switch to 443 """ def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 metadata: name: test-tlscontexts-secret labels: kat-ambassador-id: tlscontextstest data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR1RENDQXFDZ0F3SUJBZ0lKQUowWDU3ZXlwQk5UTUEwR0NTcUdTSWIzRFFFQkN3VUFNSEV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4R3pBWkJnTlZCQU1NRW0xaGMzUmxjaTVrCllYUmhkMmx5WlM1cGJ6QWVGdzB4T1RBeE1UQXhPVEF6TXpCYUZ3MHlOREF4TURreE9UQXpNekJhTUhFeEN6QUoKQmdOVkJBWVRBbFZUTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRSwpEQWhFWVhSaGQybHlaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEd6QVpCZ05WQkFNTUVtMWhjM1JsCmNpNWtZWFJoZDJseVpTNXBiekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFPdlEKVjVad1NmcmQ1Vndtelo5SmNoOTdyUW40OXA2b1FiNkVIWjF5T2EyZXZBNzE2NWpkMHFqS1BPMlgyRk80MVg4QgpwQWFLZExnMmltaC9wL2NXN2JncjNHNnRHVEZVMVZHanllTE1EV0Q1MGV2TTYydnpYOFRuYVV6ZFRHTjFOdTM2CnJaM2JnK0VLcjhFYjI1b2RabEpyMm1mNktSeDdTcjZzT1N4NlE1VHhSb3NycmZ0d0tjejI5cHZlMGQ4b0NiZGkKRFJPVlZjNXpBaW0zc2Nmd3VwRUJrQzYxdlpKMzhmaXYwRENYOVpna3BMdEZKUTllTEVQSEdKUGp5ZmV3alNTeQovbk52L21Sc2J6aUNtQ3R3Z3BmbFRtODljK3EzSWhvbUE1YXhZQVFjQ0NqOXBvNUhVZHJtSUJKR0xBTVZ5OWJ5CkZnZE50aFdBeHZCNHZmQXl4OXNDQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRkdUOVAvOHBQeGI3UVJVeFcvV2gKaXpkMnNnbEtNQjhHQTFVZEl3UVlNQmFBRkdUOVAvOHBQeGI3UVJVeFcvV2hpemQyc2dsS01BOEdBMVVkRXdFQgovd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS3NWT2Fyc01aSXhLOUpLUzBHVHNnRXNjYThqCllhTDg1YmFsbndBbnBxMllSMGNIMlhvd2dLYjNyM3VmbVRCNERzWS9RMGllaENKeTMzOUJyNjVQMVBKMGgvemYKZEZOcnZKNGlvWDVMWnc5YkowQVFORCtZUTBFK010dFppbE9DbHNPOVBCdm1tUEp1dWFlYVdvS2pWZnNOL1RjMAoycUxVM1pVMHo5bmhYeDZlOWJxYUZLSU1jYnFiVk9nS2p3V0ZpbDlkRG4vQ29KbGFUUzRJWjlOaHFjUzhYMXd0ClQybWQvSUtaaEtKc3A3VlBGeDU5ZWhuZ0VPakZocGhzd20xdDhnQWVxL1A3SkhaUXlBUGZYbDNyZDFSQVJuRVIKQUpmVUxET2tzWFNFb2RTZittR0NrVWh1b2QvaDhMTUdXTFh6Q2d0SHBKMndaVHA5a1ZWVWtKdkpqSVU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K kind: Secret type: Opaque """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Module name: tls ambassador_id: {self.ambassador_id} config: upstream: enabled: True secret: test-tlscontexts-secret """) yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.target.path.k8s} prefix: /{self.name}/ service: {self.target.path.fqdn} """) def scheme(self) -> str: return "https" def queries(self): yield Query(self.url(self.name + "/"), error=['connection refused', 'connection reset by peer', 'EOF', 'request canceled']) def requirements(self): yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("http://")) class ClientCertificateAuthentication(AmbassadorTest): presto_crt = """ -----BEGIN CERTIFICATE----- MIIDYTCCAkkCCQCrK74a3GFhijANBgkqhkiG9w0BAQsFADBxMQswCQYDVQQGEwJV UzELMAkGA1UECAwCTUExDzANBgNVBAcMBkJvc3RvbjERMA8GA1UECgwIRGF0YXdp cmUxFDASBgNVBAsMC0VuZ2luZWVyaW5nMRswGQYDVQQDDBJtYXN0ZXIuZGF0YXdp cmUuaW8wIBcNMTkwMTEwMTkxOTUyWhgPMjExODEyMTcxOTE5NTJaMHIxCzAJBgNV BAYTAklOMQswCQYDVQQIDAJLQTESMBAGA1UEBwwJQmFuZ2Fsb3JlMQ8wDQYDVQQK DAZQcmVzdG8xFDASBgNVBAsMC0VuZ2luZWVyaW5nMRswGQYDVQQDDBJwcmVzdG8u ZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvPcFp hw5Ja67z23L4YCYTgNdw4eVh7EHyzOpmf3VGhvx/UtNMVOH7Dcf+I7QEyxtQeBiZ HOcThgr/k/wrAbMjdThRS8yJxRZgj79Li92pKkJbhLGsBeTuw8lBhtwyn85vEZrt TOWEjlXHHLlz1OHiSAfYChIGjenPu5sT++O1AAs15b/0STBxkrZHGVimCU6qEWqB PYVcGYqXdb90mbsuY5GAdAzUBCGQH/RLZAl8ledT+uzkcgHcF30gUT5Ik5Ks4l/V t+C6I52Y0S4aCkT38XMYKMiBh7XzpjJUnR0pW5TYS37wq6nnVFsNReaMKmbOWp1X 5wEjoRJqDrHtVvjDAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAI3LR5fS6D6yFa6b yl6+U/i44R3VYJP1rkee0s4C4WbyXHURTqQ/0z9wLU+0Hk57HI+7f5HO/Sr0q3B3 wuZih+TUbbsx5jZW5e++FKydFWpx7KY4MUJmePydEMoUaSQjHWnlAuv9PGp5ZZ30 t0lP/mVGNAeiXsILV8gRHnP6aV5XywK8c+828BQDRfizJ+uKYvnAJmqpn4aOOJh9 csjrK52+RNebMT0VxZF4JYGd0k00au9CaciWpPk69C+A/7K/xtV4ZFtddVP9SldF ahmIu2g3fI5G+/2Oz8J+qX2B+QqT21/pOPKnMQU54BQ6bmI3fBM9B+2zm92FfgYH 9wgA5+Y= -----END CERTIFICATE----- """ presto_key = """ -----BEGIN RSA PRIVATE KEY----- MIIEoQIBAAKCAQEArz3BaYcOSWuu89ty+GAmE4DXcOHlYexB8szqZn91Rob8f1LT TFTh+w3H/iO0BMsbUHgYmRznE4YK/5P8KwGzI3U4UUvMicUWYI+/S4vdqSpCW4Sx rAXk7sPJQYbcMp/ObxGa7UzlhI5Vxxy5c9Th4kgH2AoSBo3pz7ubE/vjtQALNeW/ 9EkwcZK2RxlYpglOqhFqgT2FXBmKl3W/dJm7LmORgHQM1AQhkB/0S2QJfJXnU/rs 5HIB3Bd9IFE+SJOSrOJf1bfguiOdmNEuGgpE9/FzGCjIgYe186YyVJ0dKVuU2Et+ 8Kup51RbDUXmjCpmzlqdV+cBI6ESag6x7Vb4wwIDAQABAoIBAHfXwPS9Mw0NAoms kzS+9Gs0GqINKoTMQNGeR9Mu6XIBEJ62cuBp0F2TsCjiG9OHXzep2hCkDndwnQbq GnMC55KhMJGQR+IUEdiZldZBYaa1ysmxtpwRL94FsRYJ9377gP6+SHhutSvw90KD J2TKumu4nPym7mrjFHpHL6f8BF6b9dJftE2o27TX04+39kPiX4d+4CLfG7YFteYR 98qYHwAk58+s3jJxk7gaDehb0PvOIma02eLF7dNA7h0BtB2h2rfPLNlgKv2MN7k3 NxRHwXEzSCfK8rL8yxQLo4gOy3up+LU7LRERBIkpOyS5tkKcIGoG1w5zEB4sqJZC Me2ZbUkCgYEA4RGHtfYkecTIBwSCgdCqJYa1zEr35xbgqxOWF7DfjjMwfxeitdh+ U487SpDpoH68Rl/pnqQcHToQWRfLGXv0NZxsQDH5UulK2dLy2JfQSlFMWc0rQ210 v8F35GXohB3vi4Tfrl8wrkEBbCBoZDmp7MPZEGVGb0KVl+gU2u19CwUCgYEAx1Mt w6M8+bj3ZQ9Va9tcHSk9IVRKx0fklWY0/cmoGw5P2q/Yudd3CGupINGEA/lHqqW3 boxfdneYijOmTQO9/od3/NQRDdTrCRKOautts5zeJw7fUvls5/Iip5ZryR5mYqEz Q/yMffzZPYVPXR0E/HEnCjf8Vs+0dDa2QwAhDycCf0j4ZgeYxjq0kiW0UJvGC2Qf SNHzfGxv/md48jC8J77y2cZa42YRyuNMjOygDx75+BDZB+VnT7YqHSLFlBOvHH5F ONOXYD6BZMM6oYGXtvBha1+yJVS3KCMDltt2LuymyAN0ERF3y1CzwsJLv4y/JVie JsIqE6v+6oFVvW09kk0CgYEAuazRL7ILJfDYfAqJnxxLNVrp9/cmZXaiB02bRWIp N3Lgji1KbOu6lVx8wvaIzI7U5LDUK6WVc6y6qtqsKoe237hf3GPLsx/JBb2EbzL6 ENuq0aV4AToZ6gLTp1tm8oVgCLZzI/zI/r+fukBJispyj5n0LP+0D0YSqkMhC06+ fPcCgYB85vDLHorvbb8CYcIOvJxogMjXVasOfSLqtCkzICg4i6qCmLkXbs0qmDIz bIpIFzUdXu3tu+gPV6ab9dPmpj1M77yu7+QLL7zRy/1/EJaY/tFjWzcuF5tP7jKT UZCMWuBXFwTbeSQHESs5IWpSDxBGJbSNFmCeyo52Dw/fSYxUEg== -----END RSA PRIVATE KEY----- """ ca_cert = """ -----BEGIN CERTIFICATE----- MIIDuDCCAqCgAwIBAgIJAJ0X57eypBNTMA0GCSqGSIb3DQEBCwUAMHExCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJNQTEPMA0GA1UEBwwGQm9zdG9uMREwDwYDVQQKDAhE YXRhd2lyZTEUMBIGA1UECwwLRW5naW5lZXJpbmcxGzAZBgNVBAMMEm1hc3Rlci5k YXRhd2lyZS5pbzAeFw0xOTAxMTAxOTAzMzBaFw0yNDAxMDkxOTAzMzBaMHExCzAJ BgNVBAYTAlVTMQswCQYDVQQIDAJNQTEPMA0GA1UEBwwGQm9zdG9uMREwDwYDVQQK DAhEYXRhd2lyZTEUMBIGA1UECwwLRW5naW5lZXJpbmcxGzAZBgNVBAMMEm1hc3Rl ci5kYXRhd2lyZS5pbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOvQ V5ZwSfrd5VwmzZ9Jch97rQn49p6oQb6EHZ1yOa2evA7165jd0qjKPO2X2FO41X8B pAaKdLg2imh/p/cW7bgr3G6tGTFU1VGjyeLMDWD50evM62vzX8TnaUzdTGN1Nu36 rZ3bg+EKr8Eb25odZlJr2mf6KRx7Sr6sOSx6Q5TxRosrrftwKcz29pve0d8oCbdi DROVVc5zAim3scfwupEBkC61vZJ38fiv0DCX9ZgkpLtFJQ9eLEPHGJPjyfewjSSy /nNv/mRsbziCmCtwgpflTm89c+q3IhomA5axYAQcCCj9po5HUdrmIBJGLAMVy9by FgdNthWAxvB4vfAyx9sCAwEAAaNTMFEwHQYDVR0OBBYEFGT9P/8pPxb7QRUxW/Wh izd2sglKMB8GA1UdIwQYMBaAFGT9P/8pPxb7QRUxW/Whizd2sglKMA8GA1UdEwEB /wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAKsVOarsMZIxK9JKS0GTsgEsca8j YaL85balnwAnpq2YR0cH2XowgKb3r3ufmTB4DsY/Q0iehCJy339Br65P1PJ0h/zf dFNrvJ4ioX5LZw9bJ0AQND+YQ0E+MttZilOClsO9PBvmmPJuuaeaWoKjVfsN/Tc0 2qLU3ZU0z9nhXx6e9bqaFKIMcbqbVOgKjwWFil9dDn/CoJlaTS4IZ9NhqcS8X1wt T2md/IKZhKJsp7VPFx59ehngEOjFhphswm1t8gAeq/P7JHZQyAPfXl3rd1RARnER AJfULDOksXSEodSf+mGCkUhuod/h8LMGWLXzCgtHpJ2wZTp9kVVUkJvJjIU= -----END CERTIFICATE----- """ def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 metadata: name: test-clientcert-client-secret labels: kat-ambassador-id: clientcertificateauthentication data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR1RENDQXFDZ0F3SUJBZ0lKQUowWDU3ZXlwQk5UTUEwR0NTcUdTSWIzRFFFQkN3VUFNSEV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4R3pBWkJnTlZCQU1NRW0xaGMzUmxjaTVrCllYUmhkMmx5WlM1cGJ6QWVGdzB4T1RBeE1UQXhPVEF6TXpCYUZ3MHlOREF4TURreE9UQXpNekJhTUhFeEN6QUoKQmdOVkJBWVRBbFZUTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRSwpEQWhFWVhSaGQybHlaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEd6QVpCZ05WQkFNTUVtMWhjM1JsCmNpNWtZWFJoZDJseVpTNXBiekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFPdlEKVjVad1NmcmQ1Vndtelo5SmNoOTdyUW40OXA2b1FiNkVIWjF5T2EyZXZBNzE2NWpkMHFqS1BPMlgyRk80MVg4QgpwQWFLZExnMmltaC9wL2NXN2JncjNHNnRHVEZVMVZHanllTE1EV0Q1MGV2TTYydnpYOFRuYVV6ZFRHTjFOdTM2CnJaM2JnK0VLcjhFYjI1b2RabEpyMm1mNktSeDdTcjZzT1N4NlE1VHhSb3NycmZ0d0tjejI5cHZlMGQ4b0NiZGkKRFJPVlZjNXpBaW0zc2Nmd3VwRUJrQzYxdlpKMzhmaXYwRENYOVpna3BMdEZKUTllTEVQSEdKUGp5ZmV3alNTeQovbk52L21Sc2J6aUNtQ3R3Z3BmbFRtODljK3EzSWhvbUE1YXhZQVFjQ0NqOXBvNUhVZHJtSUJKR0xBTVZ5OWJ5CkZnZE50aFdBeHZCNHZmQXl4OXNDQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRkdUOVAvOHBQeGI3UVJVeFcvV2gKaXpkMnNnbEtNQjhHQTFVZEl3UVlNQmFBRkdUOVAvOHBQeGI3UVJVeFcvV2hpemQyc2dsS01BOEdBMVVkRXdFQgovd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS3NWT2Fyc01aSXhLOUpLUzBHVHNnRXNjYThqCllhTDg1YmFsbndBbnBxMllSMGNIMlhvd2dLYjNyM3VmbVRCNERzWS9RMGllaENKeTMzOUJyNjVQMVBKMGgvemYKZEZOcnZKNGlvWDVMWnc5YkowQVFORCtZUTBFK010dFppbE9DbHNPOVBCdm1tUEp1dWFlYVdvS2pWZnNOL1RjMAoycUxVM1pVMHo5bmhYeDZlOWJxYUZLSU1jYnFiVk9nS2p3V0ZpbDlkRG4vQ29KbGFUUzRJWjlOaHFjUzhYMXd0ClQybWQvSUtaaEtKc3A3VlBGeDU5ZWhuZ0VPakZocGhzd20xdDhnQWVxL1A3SkhaUXlBUGZYbDNyZDFSQVJuRVIKQUpmVUxET2tzWFNFb2RTZittR0NrVWh1b2QvaDhMTUdXTFh6Q2d0SHBKMndaVHA5a1ZWVWtKdkpqSVU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K kind: Secret type: Opaque --- apiVersion: v1 kind: Secret metadata: name: test-clientcert-server-secret labels: kat-ambassador-id: clientcertificateauthentication type: kubernetes.io/tls data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURaekNDQWs4Q0NRQ3JLNzRhM0dGaGlUQU5CZ2txaGtpRzl3MEJBUXNGQURCeE1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1RVRXhEekFOQmdOVkJBY01Ca0p2YzNSdmJqRVJNQThHQTFVRUNnd0lSR0YwWVhkcApjbVV4RkRBU0JnTlZCQXNNQzBWdVoybHVaV1Z5YVc1bk1Sc3dHUVlEVlFRRERCSnRZWE4wWlhJdVpHRjBZWGRwCmNtVXVhVzh3SGhjTk1Ua3dNVEV3TVRrd056TTRXaGNOTWprd01UQTNNVGt3TnpNNFdqQjZNUXN3Q1FZRFZRUUcKRXdKSlRqRUxNQWtHQTFVRUNBd0NTMEV4RWpBUUJnTlZCQWNNQ1VKaGJtZGhiRzl5WlRFVE1CRUdBMVVFQ2d3SwpRVzFpWVhOellXUnZjakVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEh6QWRCZ05WQkFNTUZtRnRZbUZ6CmMyRmtiM0l1WlhoaGJYQnNaUzVqYjIwd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUIKQVFDN1liY3o5SkZOSHVYY3pvZERrTURvUXd0M1pmQnpjaElwTFlkeHNDZnB1UUYybGNmOGxXMEJKNnZlNU0xTAovMjNZalFYeEFsV25VZ3FZdFlEL1hiZGh3RCtyRWx3RXZWUzR1US9IT2EyUTUwVkF6SXNYa0lxWm00dVA1QzNECk8rQ0NncXJ3UUgzYS8vdlBERldYWkUyeTJvcUdZdE1Xd20zVXQrYnFWSFEzOThqcTNoaGt3MmNXL0pLTjJkR2UKRjk0OWxJWG15NHMrbGE3b21RWldWY0JFcWdQVzJDL1VrZktSbVdsVkRwK0duSk8vZHFobDlMN3d2a2hhc2JETAphbVkweXdiOG9LSjFRdmlvV1JxcjhZZnQ5NzVwaGgzazRlRVdMMUNFTmxFK09vUWNTNVRPUEdndko3WlMyaU43CllVTDRBK0gydCt1WWdUdnFSYVNqcTdnckFnTUJBQUV3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUJURGJ4MzkKUGpoT2JpVW1Rdm9vbVhOVjJ1TG1FZkxJcGlKQUhWOTM0VTlmMnhVUS93eExkcElhVXM0WTlRSzhOR2h2U3dSSAp4Y2w4R2hGYzBXRDRoNEJTdmNhdUdVS21LRzh5ZVFhdGhGVjBzcGFHYjUvaFBqUVdDWnNYK3crbjU4WDROOHBrCmx5YkE4akZGdUZlb3R3Z1l6UUhzQUppU29DbW9OQ0ZkaE4xT05FS1FMY1gxT2NRSUFUd3JVYzRBRkw2Y0hXZ1MKb1FOc3BTMlZIbENsVkpVN0E3Mkh4R3E5RFVJOWlaMmYxVnc1Rmpod0dxalBQMDJVZms1Tk9RNFgzNWlrcjlDcApyQWtJSnh1NkZPUUgwbDBmZ3VNUDlsUFhJZndlMUowQnNLZHRtd2wvcHp0TVV5dW5TbURVWEgyR1l5YmdQTlQyCnNMVFF1RFZaR0xmbFJUdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdTJHM00vU1JUUjdsM002SFE1REE2RU1MZDJYd2MzSVNLUzJIY2JBbjZia0JkcFhICi9KVnRBU2VyM3VUTlMvOXQySTBGOFFKVnAxSUttTFdBLzEyM1ljQS9xeEpjQkwxVXVMa1B4em10a09kRlFNeUwKRjVDS21adUxqK1F0d3p2Z2dvS3E4RUI5MnYvN3p3eFZsMlJOc3RxS2htTFRGc0p0MUxmbTZsUjBOL2ZJNnQ0WQpaTU5uRnZ5U2pkblJuaGZlUFpTRjVzdUxQcFd1NkprR1ZsWEFSS29EMXRndjFKSHlrWmxwVlE2ZmhweVR2M2FvClpmUys4TDVJV3JHd3kycG1OTXNHL0tDaWRVTDRxRmthcS9HSDdmZSthWVlkNU9IaEZpOVFoRFpSUGpxRUhFdVUKemp4b0x5ZTJVdG9qZTJGQytBUGg5cmZybUlFNzZrV2tvNnU0S3dJREFRQUJBb0lCQVFDbmZrZjViQko1Z2pYcgpzcnliKzRkRDFiSXBMdmpJNk4wczY2S1hUK1BOZW03QlprOVdDdWRkMGUxQ2x2aWZoeG5VS1BKM3BTT1ZKYk9OCkh5aklteWV4ZTl3dGVZTEJSYysyTXMzVXdrelFLcm52bXlaMWtPRWpQek40RW5tSmV6dEt6YXdvaHkwNGxmcXEKNzVhT2RiMHlNMEVCc05LSkZKQ0NSVVJtajhrMndJQXIwbHFhV0ZNcGlYT3FzTXBvWTZMY3plaGlMZHU0bUFaSQpRRHhCM3dLVGpmdGNIdzcxTmFKZlg5V2t2OFI4ZWlqeWpNOUl2Y1cwZmRQem9YVTBPZEFTa09ZRlFIZHlCUFNiCjllNWhDSGFJczZia1hBOEs4YmZRazBSL0d6STcyVXArd0JrbnJnTlhZTXFudHJSa0ljNURER1g0b3VOc2lqUkoKSWtrWER2TjVBb0dCQU8veFQrNTYyQ2hwc3R2NUpvMi9ycFdGb05tZ3ZJT0RMRGxiamhHZEpqKytwNk1BdjFQWgo2d042WnozMmppUG1OYzdCK2hrQm40RFQvVkFpU3NLRG1SK09tUkg1TVNzQXh6aWRxU3lNcldxdG1lMDNBVzd6Cklja0FNTGdwWHhDdW1HMzRCM2Jxb3VUdGVRdm5WcmRlR2hvdUJ5OUJSMVpXbnRtWHVscVhyNUFmQW9HQkFNZnIKN29NVGwzdUVVeml5a0IzYmkxb0RYdUNjN01Qc3h0c1IwdElqZXc3RStwTGoyaUxXZUZuMGVhdnJYaHQ1ODRJbwpDZG90a1ZMMHhrZ1g3M2ZremxEd1hobTJVTXBaQmxzSzBnR09SaUYzd0ZMU0hJNmxRUmJkaXRIb0JqcDRGTEZzCitlanZKUDZ1ZitBekZ5cjBLTnc3TnpyaCthbFhFQ09RS2NqUXJlWjFBb0dBQXRLZzhScEszcmJYbnRUZ2lqeGUKRG01REJTeHA2MVlvdUFnR3ROaFhjZHFKV0ZhUzZhYWZxQ3ZSZVI0a2IvR3VZbDlQMU9sNitlWUVqZVBKWTE1dQo5N3NTdSs1bGtLN3lxUXpaeDZka0J1UkI4bE42VmRiUVorL3pvc2NCMGsxcmg2ZXFWdEROMThtZmFlOXZ5cnAxCnJpY3FlSGpaSVAvbDRJTnpjc3RrQ2xzQ2dZQmh5TVZkZVZ5emZuS1NIY3lkdmY5MzVJUW9pcmpIeiswbnc1MEIKU1hkc0x1NThvRlBXakY1TGFXZUZybGJXUzV6T1FiVW44UGZPd29pbFJJZk5kYTF3SzFGcmRDQXFDTWN5Q3FYVApPdnFVYmhVMHJTNW9tdTJ1T0dnbzZUcjZxRGMrM1JXVFdEMFpFTkxkSDBBcXMwZTFDSVdvR0ZWYi9ZaVlUSEFUCmwvWW03UUtCZ1FEcFYvSjRMakY5VzBlUlNXenFBaDN1TStCdzNNN2NEMUxnUlZ6ZWxGS2w2ZzRBMWNvdU8wbHAKalpkMkVMZDlzTHhBVENVeFhQZ0dDTjY0RVNZSi92ZUozUmJzMTMrU2xqdjRleTVKck1ieEhNRC9CU1ovY2VjaAp4aFNWNkJsMHVKb2tlMTRPMEJ3OHJzSUlxZTVZSUxqSlMwL2E2eTllSlJtaGZJVG9PZU5PTUE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Module ambassador_id: {self.ambassador_id} name: tls config: server: enabled: True secret: test-clientcert-server-secret client: enabled: True secret: test-clientcert-client-secret cert_required: True """) yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.target.path.k8s} prefix: /{self.name}/ service: {self.target.path.fqdn} """) def scheme(self) -> str: return "https" def queries(self): yield Query(self.url(self.name + "/"), insecure=True, client_crt=self.presto_crt, client_key=self.presto_key, client_cert_required=True, ca_cert=self.ca_cert) yield Query(self.url(self.name + "/"), insecure=True, error="handshake failure") def requirements(self): for r in super().requirements(): query = r[1] query.insecure = True query.client_cert = self.presto_crt query.client_key = self.presto_key query.client_cert_required = True query.ca_cert = self.ca_cert yield (r[0], query) class TLSOriginationSecret(AmbassadorTest): def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 kind: Secret metadata: name: test-origination-secret labels: kat-ambassador-id: tlsoriginationsecret type: kubernetes.io/tls data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Module ambassador_id: {self.ambassador_id} name: tls config: upstream: secret: test-origination-secret upstream-files: cert_chain_file: /tmp/ambassador/snapshots/default/secrets-decoded/test-origination-secret/F94E4DCF30ABC50DEF240AA8024599B67CC03991.crt private_key_file: /tmp/ambassador/snapshots/default/secrets-decoded/test-origination-secret/F94E4DCF30ABC50DEF240AA8024599B67CC03991.key """) yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.target.path.k8s} prefix: /{self.name}/ service: {self.target.path.fqdn} tls: upstream """) yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.target.path.k8s}-files prefix: /{self.name}-files/ service: {self.target.path.fqdn} tls: upstream-files """) def queries(self): yield Query(self.url(self.name + "/")) yield Query(self.url(self.name + "-files/")) def check(self): for r in self.results: assert r.backend.request.tls.enabled class TLS(AmbassadorTest): target: ServiceType def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 kind: Secret metadata: name: test-tls-secret labels: kat-ambassador-id: tls type: kubernetes.io/tls data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K --- apiVersion: v1 kind: Secret metadata: name: ambassador-certs labels: kat-ambassador-id: tls type: kubernetes.io/tls data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K """ def config(self): # Use self here, not self.target, because we want the TLS module to # be annotated on the Ambassador itself. yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Module name: tls ambassador_id: {self.ambassador_id} config: server: enabled: True secret: test-tls-secret """) # Use self.target _here_, because we want the httpbin mapping to # be annotated on the service, not the Ambassador. Also, you don't # need to include the ambassador_id unless you need some special # ambassador_id that isn't something that kat already knows about. # # If the test were more complex, we'd probably need to do some sort # of mangling for the mapping name and prefix. For this simple test, # it's not necessary. yield self.target, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: tls_target_mapping prefix: /tls-target/ service: {self.target.path.fqdn} """) def scheme(self) -> str: return "https" def queries(self): yield Query(self.url("tls-target/"), insecure=True) class TLSInvalidSecret(AmbassadorTest): target: ServiceType def init(self): self.target = HTTP() def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Module name: tls ambassador_id: {self.ambassador_id} config: server: enabled: True secret: test-certs-secret-invalid missing-secret-key: cert_chain_file: /nonesuch bad-path-info: cert_chain_file: /nonesuch private_key_file: /nonesuch validation-without-termination: enabled: True secret: test-certs-secret-invalid ca_secret: ambassador-certs """) yield self.target, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: tls_target_mapping prefix: /tls-target/ service: {self.target.path.fqdn} """) def scheme(self) -> str: return "http" def queries(self): yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), phase=2) def check(self): errors = self.results[0].backend.response expected = set({ "TLSContext server found no certificate in secret test-certs-secret-invalid in namespace default, ignoring...", "TLSContext bad-path-info found no cert_chain_file '/nonesuch'", "TLSContext bad-path-info found no private_key_file '/nonesuch'", "TLSContext validation-without-termination found no certificate in secret test-certs-secret-invalid in namespace default, ignoring...", "TLSContext missing-secret-key: 'cert_chain_file' requires 'private_key_file' as well", }) current = set({}) for errsvc, errtext in errors: current.add(errtext) diff = expected - current assert len(diff) == 0, f'expected {len(expected)} errors, got {len(errors)}: Missing {diff}' class TLSContextTest(AmbassadorTest): # debug = True def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 kind: Namespace metadata: name: secret-namespace --- apiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K kind: Secret metadata: name: test-tlscontext-secret-0 labels: kat-ambassador-id: tlscontext type: kubernetes.io/tls --- apiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K kind: Secret metadata: name: test-tlscontext-secret-1 namespace: secret-namespace labels: kat-ambassador-id: tlscontext type: kubernetes.io/tls --- apiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUlIWTY3cFNoZ3NyTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB5TUI0WERURTRNVEV3TVRFME1EUXhObG9YCkRUSTRNVEF5T1RFME1EUXhObG93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRJd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURjQThZdGgvUFdhT0dTCm9ObXZFSFoyNGpRN1BLTitENG93TEhXZWl1UmRtaEEwWU92VTN3cUczVnFZNFpwbFpBVjBQS2xELysyWlNGMTQKejh3MWVGNFFUelphWXh3eTkrd2ZITmtUREVwTWpQOEpNMk9FYnlrVVJ4VVJ2VzQrN0QzMEUyRXo1T1BseG1jMApNWU0vL0pINUVEUWhjaURybFlxZTFTUk1SQUxaZVZta2FBeXU2TkhKVEJ1ajBTSVB1ZExUY2grOTBxK3Jkd255CmZrVDF4M09UYW5iV2pub21FSmU3TXZ5NG12dnFxSUh1NDhTOUM4WmQxQkdWUGJ1OFYvVURyU1dROXpZQ1g0U0cKT2FzbDhDMFhtSDZrZW1oUERsRC9UdjB4dnlINXE1TVVjSGk0bUp0Titnem9iNTREd3pWR0VqZWY1TGVTMVY1RgowVEFQMGQrWEFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUWRGMEdRSGRxbHRoZG5RWXFWaXVtRXJsUk9mREFmCkJnTlZIU01FR0RBV2dCUWRGMEdRSGRxbHRoZG5RWXFWaXVtRXJsUk9mREFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBbUFLYkNsdUhFZS9JRmJ1QWJneDBNenV6aTkwd2xtQVBiOGdtTwpxdmJwMjl1T1ZzVlNtUUFkZFBuZEZhTVhWcDFaaG1UVjVDU1F0ZFgyQ1ZNVyswVzQ3Qy9DT0Jkb1NFUTl5akJmCmlGRGNseG04QU4yUG1hR1FhK3hvT1hnWkxYZXJDaE5LV0JTWlIrWktYTEpTTTlVYUVTbEhmNXVuQkxFcENqK2oKZEJpSXFGY2E3eElGUGtyKzBSRW9BVmMveFBubnNhS2pMMlV5Z0dqUWZGTnhjT042Y3VjYjZMS0pYT1pFSVRiNQpINjhKdWFSQ0tyZWZZK0l5aFFWVk5taWk3dE1wY1UyS2pXNXBrVktxVTNkS0l0RXEyVmtTZHpNVUtqTnhZd3FGCll6YnozNFQ1MENXbm9HbU5SQVdKc0xlVmlPWVUyNmR3YkFXZDlVYitWMDFRam43OAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktrd2dnU2xBZ0VBQW9JQkFRRGNBOFl0aC9QV2FPR1MKb05tdkVIWjI0alE3UEtOK0Q0b3dMSFdlaXVSZG1oQTBZT3ZVM3dxRzNWcVk0WnBsWkFWMFBLbEQvKzJaU0YxNAp6OHcxZUY0UVR6WmFZeHd5OSt3ZkhOa1RERXBNalA4Sk0yT0VieWtVUnhVUnZXNCs3RDMwRTJFejVPUGx4bWMwCk1ZTS8vSkg1RURRaGNpRHJsWXFlMVNSTVJBTFplVm1rYUF5dTZOSEpUQnVqMFNJUHVkTFRjaCs5MHErcmR3bnkKZmtUMXgzT1RhbmJXam5vbUVKZTdNdnk0bXZ2cXFJSHU0OFM5QzhaZDFCR1ZQYnU4Vi9VRHJTV1E5ellDWDRTRwpPYXNsOEMwWG1INmtlbWhQRGxEL1R2MHh2eUg1cTVNVWNIaTRtSnROK2d6b2I1NER3elZHRWplZjVMZVMxVjVGCjBUQVAwZCtYQWdNQkFBRUNnZ0VCQUk2U3I0anYwZForanJhN0gzVnZ3S1RYZnl0bjV6YVlrVjhZWUh3RjIyakEKbm9HaTBSQllIUFU2V2l3NS9oaDRFWVM2anFHdkptUXZYY3NkTldMdEJsK2hSVUtiZVRtYUtWd2NFSnRrV24xeQozUTQwUytnVk5OU2NINDRvYUZuRU0zMklWWFFRZnBKMjJJZ2RFY1dVUVcvWnpUNWpPK3dPTXc4c1plSTZMSEtLCkdoOENsVDkrRGUvdXFqbjNCRnQwelZ3cnFLbllKSU1DSWFrb2lDRmtIcGhVTURFNVkyU1NLaGFGWndxMWtLd0sKdHFvWFpKQnlzYXhnUTFRa21mS1RnRkx5WlpXT01mRzVzb1VrU1RTeURFRzFsYnVYcHpUbTlVSTlKU2lsK01yaAp1LzVTeXBLOHBCSHhBdFg5VXdiTjFiRGw3Sng1SWJyMnNoM0F1UDF4OUpFQ2dZRUE4dGNTM09URXNOUFpQZlptCk9jaUduOW9STTdHVmVGdjMrL05iL3JodHp1L1RQUWJBSzhWZ3FrS0dPazNGN1krY2txS1NTWjFnUkF2SHBsZEIKaTY0Y0daT1dpK01jMWZVcEdVV2sxdnZXbG1nTUlQVjVtbFpvOHowMlNTdXhLZTI1Y2VNb09oenFlay9vRmFtdgoyTmxFeTh0dEhOMUxMS3grZllhMkpGcWVycThDZ1lFQTUvQUxHSXVrU3J0K0dkektJLzV5cjdSREpTVzIzUTJ4CkM5ZklUTUFSL1Q4dzNsWGhyUnRXcmlHL3l0QkVPNXdTMVIwdDkydW1nVkhIRTA5eFFXbzZ0Tm16QVBNb1RSekMKd08yYnJqQktBdUJkQ0RISjZsMlFnOEhPQWovUncrK2x4bEN0VEI2YS8xWEZIZnNHUGhqMEQrWlJiWVZzaE00UgpnSVVmdmpmQ1Y1a0NnWUVBMzdzL2FieHJhdThEaTQ3a0NBQ3o1N3FsZHBiNk92V2d0OFF5MGE5aG0vSmhFQ3lVCkNML0VtNWpHeWhpMWJuV05yNXVRWTdwVzR0cG5pdDJCU2d1VFlBMFYrck8zOFhmNThZcTBvRTFPR3l5cFlBUkoKa09SanRSYUVXVTJqNEJsaGJZZjNtL0xnSk9oUnp3T1RPNXFSUTZHY1dhZVlod1ExVmJrelByTXUxNGtDZ1lCbwp4dEhjWnNqelVidm5wd3hTTWxKUStaZ1RvZlAzN0lWOG1pQk1POEJrclRWQVczKzFtZElRbkFKdWRxTThZb2RICmF3VW03cVNyYXV3SjF5dU1wNWFadUhiYkNQMjl5QzVheFh3OHRtZlk0TTVtTTBmSjdqYW9ydGFId1pqYmNObHMKdTJsdUo2MVJoOGVpZ1pJU1gyZHgvMVB0ckFhWUFCZDcvYWVYWU0wVWtRS0JnUUNVbkFIdmRQUGhIVnJDWU1rTgpOOFBEK0t0YmhPRks2S3MvdlgyUkcyRnFmQkJPQWV3bEo1d0xWeFBLT1RpdytKS2FSeHhYMkcvREZVNzduOEQvCkR5V2RjM2ZCQWQ0a1lJamZVaGRGa1hHNEFMUDZBNVFIZVN4NzNScTFLNWxMVWhPbEZqc3VPZ0NKS28wVlFmRC8KT05paDB6SzN5Wmc3aDVQamZ1TUdGb09OQWc9PQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== kind: Secret metadata: name: test-tlscontext-secret-2 labels: kat-ambassador-id: tlscontext type: kubernetes.io/tls """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.name}-same-prefix-1 prefix: /tls-context-same/ service: http://{self.target.path.fqdn} host: tls-context-host-1 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-same-context-1 hosts: - tls-context-host-1 secret: test-tlscontext-secret-1.secret-namespace min_tls_version: v1.0 max_tls_version: v1.3 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: Mapping name: {self.name}-same-prefix-2 prefix: /tls-context-same/ service: http://{self.target.path.fqdn} host: tls-context-host-2 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-same-context-2 hosts: - tls-context-host-2 secret: test-tlscontext-secret-2 alpn_protocols: h2,http/1.1 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: Module name: tls config: server: enabled: True secret: test-tlscontext-secret-0 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: Mapping name: {self.name}-other-mapping prefix: /{self.name}/ service: https://{self.target.path.fqdn} """) # Ambassador should not return an error when hostname is not present. yield self, self.format(""" --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-no-secret min_tls_version: v1.0 max_tls_version: v1.3 """) # Ambassador should return and error for this configuration. yield self, self.format(""" --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-same-context-error hosts: - tls-context-host-1 """) def scheme(self) -> str: return "https" @staticmethod def _go_close_connection_error(url): """ :param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection """ return "Get {}: EOF".format(url) def queries(self): # 0 yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), headers={"Host": "tls-context-host-2"}, insecure=True, sni=True) # 1 - Correct host #1 yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True) # 2 - Correct host #2 yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-2"}, expected=200, insecure=True, sni=True) # 3 - Incorrect host yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-3"}, # error=self._go_close_connection_error(self.url("tls-context-same/")), expected=404, insecure=True) # 4 - Incorrect path, correct host yield Query(self.url("tls-context-different/"), headers={"Host": "tls-context-host-1"}, expected=404, insecure=True, sni=True) # Other mappings with no host will respond with the fallbock cert. # 5 - no Host header, fallback cert from the TLS module yield Query(self.url(self.name + "/"), # error=self._go_close_connection_error(self.url(self.name + "/")), insecure=True) # 6 - explicit Host header, fallback cert yield Query(self.url(self.name + "/"), # error=self._go_close_connection_error(self.url(self.name + "/")), # sni=True, headers={"Host": "tls-context-host-3"}, insecure=True) # 7 - explicit Host header 1 wins, we'll get the SNI cert for this overlapping path yield Query(self.url(self.name + "/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True) # 7 - explicit Host header 2 wins, we'll get the SNI cert for this overlapping path yield Query(self.url(self.name + "/"), headers={"Host": "tls-context-host-2"}, expected=200, insecure=True, sni=True) def check(self): # XXX Ew. If self.results[0].json is empty, the harness won't convert it to a response. errors = self.results[0].json num_errors = len(errors) assert num_errors == 2, "expected 2 errors, got {} -\n{}".format(num_errors, errors) cert_err = errors[0] pkey_err = errors[1] assert cert_err[1] == 'TLSContext TLSContextTest-same-context-error is missing cert_chain_file' assert pkey_err[1] == 'TLSContext TLSContextTest-same-context-error is missing private_key_file' idx = 0 for result in self.results: if result.status == 200 and result.query.headers: host_header = result.query.headers['Host'] tls_common_name = result.tls[0]['Issuer']['CommonName'] # XXX Weirdness with the fallback cert here! You see, if we use host # tls-context-host-3 (or, really, anything except -1 or -2), then the # fallback cert actually has CN 'localhost'. We should replace this with # a real fallback cert, but for now, just hack the host_header. # # Ew. if host_header == 'tls-context-host-3': host_header = 'localhost' assert host_header == tls_common_name, "test %d wanted CN %s, but got %s" % (idx, host_header, tls_common_name) idx += 1 def requirements(self): # We're replacing super()'s requirements deliberately here. Without a Host header they can't work. yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True)) yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True)) yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-2"}, insecure=True, sni=True)) yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-2"}, insecure=True, sni=True)) class TLSContextProtocolMaxVersion(AmbassadorTest): # Here we're testing that the client can't exceed the maximum TLS version # configured. # # XXX 2019-09-11: vet that the test client's support for TLS v1.3 is up-to-date. # It appears not to be. # debug = True def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K kind: Secret metadata: name: secret.max-version labels: kat-ambassador-id: tlscontextprotocolmaxversion type: kubernetes.io/tls """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Module name: ambassador config: defaults: tls_secret_namespacing: False --- apiVersion: ambassador/v0 kind: Mapping name: {self.name}-same-prefix-1 prefix: /tls-context-same/ service: http://{self.target.path.fqdn} host: tls-context-host-1 --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-same-context-1 hosts: - tls-context-host-1 secret: secret.max-version min_tls_version: v1.1 max_tls_version: v1.2 """) def scheme(self) -> str: return "https" @staticmethod def _go_close_connection_error(url): """ :param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection """ return "Get {}: EOF".format(url) def queries(self): # ---- # XXX 2019-09-11 # These aren't actually reporting the negotiated version, alhough correct # behavior can be verified with a custom log format. What, does the silly thing just not # report the negotiated version if it's the max you've requested?? # # For now, we're checking for the None result, but, ew. # ---- yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, minTLSv="v1.2", maxTLSv="v1.2") # This should give us TLS v1.1 yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, minTLSv="v1.0", maxTLSv="v1.1") # This should be an error. yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, minTLSv="v1.3", maxTLSv="v1.3", error=[ "tls: server selected unsupported protocol version 303", "tls: no supported versions satisfy MinVersion and MaxVersion", "tls: protocol version not supported" ]) def check(self): tls_0_version = self.results[0].backend.request.tls.negotiated_protocol_version tls_1_version = self.results[1].backend.request.tls.negotiated_protocol_version # See comment in queries for why these are None. They should be v1.2 and v1.1 respectively. assert tls_0_version == None, f"requesting TLS v1.2 got TLS {tls_0_version}" assert tls_1_version == None, f"requesting TLS v1.0-v1.1 got TLS {tls_1_version}" def requirements(self): # We're replacing super()'s requirements deliberately here. Without a Host header they can't work. yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True, minTLSv="v1.2")) yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True, minTLSv="v1.2")) class TLSContextProtocolMinVersion(AmbassadorTest): # Here we're testing that the client can't drop below the minimum TLS version # configured. # # XXX 2019-09-11: vet that the test client's support for TLS v1.3 is up-to-date. # It appears not to be. # debug = True def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K kind: Secret metadata: name: secret.min-version labels: kat-ambassador-id: tlscontextprotocolminversion type: kubernetes.io/tls """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.name}-same-prefix-1 prefix: /tls-context-same/ service: https://{self.target.path.fqdn} host: tls-context-host-1 --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-same-context-1 hosts: - tls-context-host-1 secret: secret.min-version secret_namespacing: False min_tls_version: v1.2 max_tls_version: v1.3 """) def scheme(self) -> str: return "https" @staticmethod def _go_close_connection_error(url): """ :param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection """ return "Get {}: EOF".format(url) def queries(self): # This should give v1.3, but it currently seems to give 1.2. yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, minTLSv="v1.2", maxTLSv="v1.3") # This should give v1.2 yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, minTLSv="v1.1", maxTLSv="v1.2") # This should be an error. yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, minTLSv="v1.0", maxTLSv="v1.0", error=[ "tls: server selected unsupported protocol version 303", "tls: no supported versions satisfy MinVersion and MaxVersion", "tls: protocol version not supported" ]) def check(self): tls_0_version = self.results[0].backend.request.tls.negotiated_protocol_version tls_1_version = self.results[1].backend.request.tls.negotiated_protocol_version # Hmmm. Why does Envoy prefer 1.2 to 1.3 here?? This may be a client thing -- have to # rebuild with Go 1.13. assert tls_0_version == "v1.2", f"requesting TLS v1.2-v1.3 got TLS {tls_0_version}" assert tls_1_version == "v1.2", f"requesting TLS v1.1-v1.2 got TLS {tls_1_version}" def requirements(self): # We're replacing super()'s requirements deliberately here. Without a Host header they can't work. yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True)) yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True)) class TLSContextCipherSuites(AmbassadorTest): # debug = True def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K kind: Secret metadata: name: secret.cipher-suites labels: kat-ambassador-id: tlscontextciphersuites type: kubernetes.io/tls """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.name}-same-prefix-1 prefix: /tls-context-same/ service: https://{self.target.path.fqdn} host: tls-context-host-1 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-same-context-1 hosts: - tls-context-host-1 secret: secret.cipher-suites secret_namespacing: False max_tls_version: v1.2 cipher_suites: - ECDHE-RSA-AES128-GCM-SHA256 ecdh_curves: - P-256 """) def scheme(self) -> str: return "https" @staticmethod def _go_close_connection_error(url): """ :param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection """ return "Get {}: EOF".format(url) def queries(self): yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, cipherSuites=["TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"], maxTLSv="v1.2") yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, cipherSuites=["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"], maxTLSv="v1.2", error="tls: handshake failure",) yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, cipherSuites=["TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"], ecdhCurves=["X25519"], maxTLSv="v1.2", error="tls: handshake failure",) def check(self): tls_0_version = self.results[0].backend.request.tls.negotiated_protocol_version assert tls_0_version == "v1.2", f"requesting TLS v1.2 got TLS {tls_0_version}" def requirements(self): yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True)) yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
76.136364
2,291
0.859824
from kat.harness import Query from abstract_tests import AmbassadorTest, HTTP, ServiceType class TLSContextsTest(AmbassadorTest): def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 metadata: name: test-tlscontexts-secret labels: kat-ambassador-id: tlscontextstest data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR1RENDQXFDZ0F3SUJBZ0lKQUowWDU3ZXlwQk5UTUEwR0NTcUdTSWIzRFFFQkN3VUFNSEV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4R3pBWkJnTlZCQU1NRW0xaGMzUmxjaTVrCllYUmhkMmx5WlM1cGJ6QWVGdzB4T1RBeE1UQXhPVEF6TXpCYUZ3MHlOREF4TURreE9UQXpNekJhTUhFeEN6QUoKQmdOVkJBWVRBbFZUTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRSwpEQWhFWVhSaGQybHlaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEd6QVpCZ05WQkFNTUVtMWhjM1JsCmNpNWtZWFJoZDJseVpTNXBiekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFPdlEKVjVad1NmcmQ1Vndtelo5SmNoOTdyUW40OXA2b1FiNkVIWjF5T2EyZXZBNzE2NWpkMHFqS1BPMlgyRk80MVg4QgpwQWFLZExnMmltaC9wL2NXN2JncjNHNnRHVEZVMVZHanllTE1EV0Q1MGV2TTYydnpYOFRuYVV6ZFRHTjFOdTM2CnJaM2JnK0VLcjhFYjI1b2RabEpyMm1mNktSeDdTcjZzT1N4NlE1VHhSb3NycmZ0d0tjejI5cHZlMGQ4b0NiZGkKRFJPVlZjNXpBaW0zc2Nmd3VwRUJrQzYxdlpKMzhmaXYwRENYOVpna3BMdEZKUTllTEVQSEdKUGp5ZmV3alNTeQovbk52L21Sc2J6aUNtQ3R3Z3BmbFRtODljK3EzSWhvbUE1YXhZQVFjQ0NqOXBvNUhVZHJtSUJKR0xBTVZ5OWJ5CkZnZE50aFdBeHZCNHZmQXl4OXNDQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRkdUOVAvOHBQeGI3UVJVeFcvV2gKaXpkMnNnbEtNQjhHQTFVZEl3UVlNQmFBRkdUOVAvOHBQeGI3UVJVeFcvV2hpemQyc2dsS01BOEdBMVVkRXdFQgovd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS3NWT2Fyc01aSXhLOUpLUzBHVHNnRXNjYThqCllhTDg1YmFsbndBbnBxMllSMGNIMlhvd2dLYjNyM3VmbVRCNERzWS9RMGllaENKeTMzOUJyNjVQMVBKMGgvemYKZEZOcnZKNGlvWDVMWnc5YkowQVFORCtZUTBFK010dFppbE9DbHNPOVBCdm1tUEp1dWFlYVdvS2pWZnNOL1RjMAoycUxVM1pVMHo5bmhYeDZlOWJxYUZLSU1jYnFiVk9nS2p3V0ZpbDlkRG4vQ29KbGFUUzRJWjlOaHFjUzhYMXd0ClQybWQvSUtaaEtKc3A3VlBGeDU5ZWhuZ0VPakZocGhzd20xdDhnQWVxL1A3SkhaUXlBUGZYbDNyZDFSQVJuRVIKQUpmVUxET2tzWFNFb2RTZittR0NrVWh1b2QvaDhMTUdXTFh6Q2d0SHBKMndaVHA5a1ZWVWtKdkpqSVU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K kind: Secret type: Opaque """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Module name: tls ambassador_id: {self.ambassador_id} config: upstream: enabled: True secret: test-tlscontexts-secret """) yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.target.path.k8s} prefix: /{self.name}/ service: {self.target.path.fqdn} """) def scheme(self) -> str: return "https" def queries(self): yield Query(self.url(self.name + "/"), error=['connection refused', 'connection reset by peer', 'EOF', 'request canceled']) def requirements(self): yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("http://")) class ClientCertificateAuthentication(AmbassadorTest): presto_crt = """ -----BEGIN CERTIFICATE----- MIIDYTCCAkkCCQCrK74a3GFhijANBgkqhkiG9w0BAQsFADBxMQswCQYDVQQGEwJV UzELMAkGA1UECAwCTUExDzANBgNVBAcMBkJvc3RvbjERMA8GA1UECgwIRGF0YXdp cmUxFDASBgNVBAsMC0VuZ2luZWVyaW5nMRswGQYDVQQDDBJtYXN0ZXIuZGF0YXdp cmUuaW8wIBcNMTkwMTEwMTkxOTUyWhgPMjExODEyMTcxOTE5NTJaMHIxCzAJBgNV BAYTAklOMQswCQYDVQQIDAJLQTESMBAGA1UEBwwJQmFuZ2Fsb3JlMQ8wDQYDVQQK DAZQcmVzdG8xFDASBgNVBAsMC0VuZ2luZWVyaW5nMRswGQYDVQQDDBJwcmVzdG8u ZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvPcFp hw5Ja67z23L4YCYTgNdw4eVh7EHyzOpmf3VGhvx/UtNMVOH7Dcf+I7QEyxtQeBiZ HOcThgr/k/wrAbMjdThRS8yJxRZgj79Li92pKkJbhLGsBeTuw8lBhtwyn85vEZrt TOWEjlXHHLlz1OHiSAfYChIGjenPu5sT++O1AAs15b/0STBxkrZHGVimCU6qEWqB PYVcGYqXdb90mbsuY5GAdAzUBCGQH/RLZAl8ledT+uzkcgHcF30gUT5Ik5Ks4l/V t+C6I52Y0S4aCkT38XMYKMiBh7XzpjJUnR0pW5TYS37wq6nnVFsNReaMKmbOWp1X 5wEjoRJqDrHtVvjDAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAI3LR5fS6D6yFa6b yl6+U/i44R3VYJP1rkee0s4C4WbyXHURTqQ/0z9wLU+0Hk57HI+7f5HO/Sr0q3B3 wuZih+TUbbsx5jZW5e++FKydFWpx7KY4MUJmePydEMoUaSQjHWnlAuv9PGp5ZZ30 t0lP/mVGNAeiXsILV8gRHnP6aV5XywK8c+828BQDRfizJ+uKYvnAJmqpn4aOOJh9 csjrK52+RNebMT0VxZF4JYGd0k00au9CaciWpPk69C+A/7K/xtV4ZFtddVP9SldF ahmIu2g3fI5G+/2Oz8J+qX2B+QqT21/pOPKnMQU54BQ6bmI3fBM9B+2zm92FfgYH 9wgA5+Y= -----END CERTIFICATE----- """ presto_key = """ -----BEGIN RSA PRIVATE KEY----- MIIEoQIBAAKCAQEArz3BaYcOSWuu89ty+GAmE4DXcOHlYexB8szqZn91Rob8f1LT TFTh+w3H/iO0BMsbUHgYmRznE4YK/5P8KwGzI3U4UUvMicUWYI+/S4vdqSpCW4Sx rAXk7sPJQYbcMp/ObxGa7UzlhI5Vxxy5c9Th4kgH2AoSBo3pz7ubE/vjtQALNeW/ 9EkwcZK2RxlYpglOqhFqgT2FXBmKl3W/dJm7LmORgHQM1AQhkB/0S2QJfJXnU/rs 5HIB3Bd9IFE+SJOSrOJf1bfguiOdmNEuGgpE9/FzGCjIgYe186YyVJ0dKVuU2Et+ 8Kup51RbDUXmjCpmzlqdV+cBI6ESag6x7Vb4wwIDAQABAoIBAHfXwPS9Mw0NAoms kzS+9Gs0GqINKoTMQNGeR9Mu6XIBEJ62cuBp0F2TsCjiG9OHXzep2hCkDndwnQbq GnMC55KhMJGQR+IUEdiZldZBYaa1ysmxtpwRL94FsRYJ9377gP6+SHhutSvw90KD J2TKumu4nPym7mrjFHpHL6f8BF6b9dJftE2o27TX04+39kPiX4d+4CLfG7YFteYR 98qYHwAk58+s3jJxk7gaDehb0PvOIma02eLF7dNA7h0BtB2h2rfPLNlgKv2MN7k3 NxRHwXEzSCfK8rL8yxQLo4gOy3up+LU7LRERBIkpOyS5tkKcIGoG1w5zEB4sqJZC Me2ZbUkCgYEA4RGHtfYkecTIBwSCgdCqJYa1zEr35xbgqxOWF7DfjjMwfxeitdh+ U487SpDpoH68Rl/pnqQcHToQWRfLGXv0NZxsQDH5UulK2dLy2JfQSlFMWc0rQ210 v8F35GXohB3vi4Tfrl8wrkEBbCBoZDmp7MPZEGVGb0KVl+gU2u19CwUCgYEAx1Mt w6M8+bj3ZQ9Va9tcHSk9IVRKx0fklWY0/cmoGw5P2q/Yudd3CGupINGEA/lHqqW3 boxfdneYijOmTQO9/od3/NQRDdTrCRKOautts5zeJw7fUvls5/Iip5ZryR5mYqEz Q/yMffzZPYVPXR0E/HEnCjf8Vs+0dDa2QwAhDycCf0j4ZgeYxjq0kiW0UJvGC2Qf SNHzfGxv/md48jC8J77y2cZa42YRyuNMjOygDx75+BDZB+VnT7YqHSLFlBOvHH5F ONOXYD6BZMM6oYGXtvBha1+yJVS3KCMDltt2LuymyAN0ERF3y1CzwsJLv4y/JVie JsIqE6v+6oFVvW09kk0CgYEAuazRL7ILJfDYfAqJnxxLNVrp9/cmZXaiB02bRWIp N3Lgji1KbOu6lVx8wvaIzI7U5LDUK6WVc6y6qtqsKoe237hf3GPLsx/JBb2EbzL6 ENuq0aV4AToZ6gLTp1tm8oVgCLZzI/zI/r+fukBJispyj5n0LP+0D0YSqkMhC06+ fPcCgYB85vDLHorvbb8CYcIOvJxogMjXVasOfSLqtCkzICg4i6qCmLkXbs0qmDIz bIpIFzUdXu3tu+gPV6ab9dPmpj1M77yu7+QLL7zRy/1/EJaY/tFjWzcuF5tP7jKT UZCMWuBXFwTbeSQHESs5IWpSDxBGJbSNFmCeyo52Dw/fSYxUEg== -----END RSA PRIVATE KEY----- """ ca_cert = """ -----BEGIN CERTIFICATE----- MIIDuDCCAqCgAwIBAgIJAJ0X57eypBNTMA0GCSqGSIb3DQEBCwUAMHExCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJNQTEPMA0GA1UEBwwGQm9zdG9uMREwDwYDVQQKDAhE YXRhd2lyZTEUMBIGA1UECwwLRW5naW5lZXJpbmcxGzAZBgNVBAMMEm1hc3Rlci5k YXRhd2lyZS5pbzAeFw0xOTAxMTAxOTAzMzBaFw0yNDAxMDkxOTAzMzBaMHExCzAJ BgNVBAYTAlVTMQswCQYDVQQIDAJNQTEPMA0GA1UEBwwGQm9zdG9uMREwDwYDVQQK DAhEYXRhd2lyZTEUMBIGA1UECwwLRW5naW5lZXJpbmcxGzAZBgNVBAMMEm1hc3Rl ci5kYXRhd2lyZS5pbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOvQ V5ZwSfrd5VwmzZ9Jch97rQn49p6oQb6EHZ1yOa2evA7165jd0qjKPO2X2FO41X8B pAaKdLg2imh/p/cW7bgr3G6tGTFU1VGjyeLMDWD50evM62vzX8TnaUzdTGN1Nu36 rZ3bg+EKr8Eb25odZlJr2mf6KRx7Sr6sOSx6Q5TxRosrrftwKcz29pve0d8oCbdi DROVVc5zAim3scfwupEBkC61vZJ38fiv0DCX9ZgkpLtFJQ9eLEPHGJPjyfewjSSy /nNv/mRsbziCmCtwgpflTm89c+q3IhomA5axYAQcCCj9po5HUdrmIBJGLAMVy9by FgdNthWAxvB4vfAyx9sCAwEAAaNTMFEwHQYDVR0OBBYEFGT9P/8pPxb7QRUxW/Wh izd2sglKMB8GA1UdIwQYMBaAFGT9P/8pPxb7QRUxW/Whizd2sglKMA8GA1UdEwEB /wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAKsVOarsMZIxK9JKS0GTsgEsca8j YaL85balnwAnpq2YR0cH2XowgKb3r3ufmTB4DsY/Q0iehCJy339Br65P1PJ0h/zf dFNrvJ4ioX5LZw9bJ0AQND+YQ0E+MttZilOClsO9PBvmmPJuuaeaWoKjVfsN/Tc0 2qLU3ZU0z9nhXx6e9bqaFKIMcbqbVOgKjwWFil9dDn/CoJlaTS4IZ9NhqcS8X1wt T2md/IKZhKJsp7VPFx59ehngEOjFhphswm1t8gAeq/P7JHZQyAPfXl3rd1RARnER AJfULDOksXSEodSf+mGCkUhuod/h8LMGWLXzCgtHpJ2wZTp9kVVUkJvJjIU= -----END CERTIFICATE----- """ def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 metadata: name: test-clientcert-client-secret labels: kat-ambassador-id: clientcertificateauthentication data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR1RENDQXFDZ0F3SUJBZ0lKQUowWDU3ZXlwQk5UTUEwR0NTcUdTSWIzRFFFQkN3VUFNSEV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4R3pBWkJnTlZCQU1NRW0xaGMzUmxjaTVrCllYUmhkMmx5WlM1cGJ6QWVGdzB4T1RBeE1UQXhPVEF6TXpCYUZ3MHlOREF4TURreE9UQXpNekJhTUhFeEN6QUoKQmdOVkJBWVRBbFZUTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRSwpEQWhFWVhSaGQybHlaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEd6QVpCZ05WQkFNTUVtMWhjM1JsCmNpNWtZWFJoZDJseVpTNXBiekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFPdlEKVjVad1NmcmQ1Vndtelo5SmNoOTdyUW40OXA2b1FiNkVIWjF5T2EyZXZBNzE2NWpkMHFqS1BPMlgyRk80MVg4QgpwQWFLZExnMmltaC9wL2NXN2JncjNHNnRHVEZVMVZHanllTE1EV0Q1MGV2TTYydnpYOFRuYVV6ZFRHTjFOdTM2CnJaM2JnK0VLcjhFYjI1b2RabEpyMm1mNktSeDdTcjZzT1N4NlE1VHhSb3NycmZ0d0tjejI5cHZlMGQ4b0NiZGkKRFJPVlZjNXpBaW0zc2Nmd3VwRUJrQzYxdlpKMzhmaXYwRENYOVpna3BMdEZKUTllTEVQSEdKUGp5ZmV3alNTeQovbk52L21Sc2J6aUNtQ3R3Z3BmbFRtODljK3EzSWhvbUE1YXhZQVFjQ0NqOXBvNUhVZHJtSUJKR0xBTVZ5OWJ5CkZnZE50aFdBeHZCNHZmQXl4OXNDQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRkdUOVAvOHBQeGI3UVJVeFcvV2gKaXpkMnNnbEtNQjhHQTFVZEl3UVlNQmFBRkdUOVAvOHBQeGI3UVJVeFcvV2hpemQyc2dsS01BOEdBMVVkRXdFQgovd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS3NWT2Fyc01aSXhLOUpLUzBHVHNnRXNjYThqCllhTDg1YmFsbndBbnBxMllSMGNIMlhvd2dLYjNyM3VmbVRCNERzWS9RMGllaENKeTMzOUJyNjVQMVBKMGgvemYKZEZOcnZKNGlvWDVMWnc5YkowQVFORCtZUTBFK010dFppbE9DbHNPOVBCdm1tUEp1dWFlYVdvS2pWZnNOL1RjMAoycUxVM1pVMHo5bmhYeDZlOWJxYUZLSU1jYnFiVk9nS2p3V0ZpbDlkRG4vQ29KbGFUUzRJWjlOaHFjUzhYMXd0ClQybWQvSUtaaEtKc3A3VlBGeDU5ZWhuZ0VPakZocGhzd20xdDhnQWVxL1A3SkhaUXlBUGZYbDNyZDFSQVJuRVIKQUpmVUxET2tzWFNFb2RTZittR0NrVWh1b2QvaDhMTUdXTFh6Q2d0SHBKMndaVHA5a1ZWVWtKdkpqSVU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K kind: Secret type: Opaque --- apiVersion: v1 kind: Secret metadata: name: test-clientcert-server-secret labels: kat-ambassador-id: clientcertificateauthentication type: kubernetes.io/tls data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURaekNDQWs4Q0NRQ3JLNzRhM0dGaGlUQU5CZ2txaGtpRzl3MEJBUXNGQURCeE1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1RVRXhEekFOQmdOVkJBY01Ca0p2YzNSdmJqRVJNQThHQTFVRUNnd0lSR0YwWVhkcApjbVV4RkRBU0JnTlZCQXNNQzBWdVoybHVaV1Z5YVc1bk1Sc3dHUVlEVlFRRERCSnRZWE4wWlhJdVpHRjBZWGRwCmNtVXVhVzh3SGhjTk1Ua3dNVEV3TVRrd056TTRXaGNOTWprd01UQTNNVGt3TnpNNFdqQjZNUXN3Q1FZRFZRUUcKRXdKSlRqRUxNQWtHQTFVRUNBd0NTMEV4RWpBUUJnTlZCQWNNQ1VKaGJtZGhiRzl5WlRFVE1CRUdBMVVFQ2d3SwpRVzFpWVhOellXUnZjakVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEh6QWRCZ05WQkFNTUZtRnRZbUZ6CmMyRmtiM0l1WlhoaGJYQnNaUzVqYjIwd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUIKQVFDN1liY3o5SkZOSHVYY3pvZERrTURvUXd0M1pmQnpjaElwTFlkeHNDZnB1UUYybGNmOGxXMEJKNnZlNU0xTAovMjNZalFYeEFsV25VZ3FZdFlEL1hiZGh3RCtyRWx3RXZWUzR1US9IT2EyUTUwVkF6SXNYa0lxWm00dVA1QzNECk8rQ0NncXJ3UUgzYS8vdlBERldYWkUyeTJvcUdZdE1Xd20zVXQrYnFWSFEzOThqcTNoaGt3MmNXL0pLTjJkR2UKRjk0OWxJWG15NHMrbGE3b21RWldWY0JFcWdQVzJDL1VrZktSbVdsVkRwK0duSk8vZHFobDlMN3d2a2hhc2JETAphbVkweXdiOG9LSjFRdmlvV1JxcjhZZnQ5NzVwaGgzazRlRVdMMUNFTmxFK09vUWNTNVRPUEdndko3WlMyaU43CllVTDRBK0gydCt1WWdUdnFSYVNqcTdnckFnTUJBQUV3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUJURGJ4MzkKUGpoT2JpVW1Rdm9vbVhOVjJ1TG1FZkxJcGlKQUhWOTM0VTlmMnhVUS93eExkcElhVXM0WTlRSzhOR2h2U3dSSAp4Y2w4R2hGYzBXRDRoNEJTdmNhdUdVS21LRzh5ZVFhdGhGVjBzcGFHYjUvaFBqUVdDWnNYK3crbjU4WDROOHBrCmx5YkE4akZGdUZlb3R3Z1l6UUhzQUppU29DbW9OQ0ZkaE4xT05FS1FMY1gxT2NRSUFUd3JVYzRBRkw2Y0hXZ1MKb1FOc3BTMlZIbENsVkpVN0E3Mkh4R3E5RFVJOWlaMmYxVnc1Rmpod0dxalBQMDJVZms1Tk9RNFgzNWlrcjlDcApyQWtJSnh1NkZPUUgwbDBmZ3VNUDlsUFhJZndlMUowQnNLZHRtd2wvcHp0TVV5dW5TbURVWEgyR1l5YmdQTlQyCnNMVFF1RFZaR0xmbFJUdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdTJHM00vU1JUUjdsM002SFE1REE2RU1MZDJYd2MzSVNLUzJIY2JBbjZia0JkcFhICi9KVnRBU2VyM3VUTlMvOXQySTBGOFFKVnAxSUttTFdBLzEyM1ljQS9xeEpjQkwxVXVMa1B4em10a09kRlFNeUwKRjVDS21adUxqK1F0d3p2Z2dvS3E4RUI5MnYvN3p3eFZsMlJOc3RxS2htTFRGc0p0MUxmbTZsUjBOL2ZJNnQ0WQpaTU5uRnZ5U2pkblJuaGZlUFpTRjVzdUxQcFd1NkprR1ZsWEFSS29EMXRndjFKSHlrWmxwVlE2ZmhweVR2M2FvClpmUys4TDVJV3JHd3kycG1OTXNHL0tDaWRVTDRxRmthcS9HSDdmZSthWVlkNU9IaEZpOVFoRFpSUGpxRUhFdVUKemp4b0x5ZTJVdG9qZTJGQytBUGg5cmZybUlFNzZrV2tvNnU0S3dJREFRQUJBb0lCQVFDbmZrZjViQko1Z2pYcgpzcnliKzRkRDFiSXBMdmpJNk4wczY2S1hUK1BOZW03QlprOVdDdWRkMGUxQ2x2aWZoeG5VS1BKM3BTT1ZKYk9OCkh5aklteWV4ZTl3dGVZTEJSYysyTXMzVXdrelFLcm52bXlaMWtPRWpQek40RW5tSmV6dEt6YXdvaHkwNGxmcXEKNzVhT2RiMHlNMEVCc05LSkZKQ0NSVVJtajhrMndJQXIwbHFhV0ZNcGlYT3FzTXBvWTZMY3plaGlMZHU0bUFaSQpRRHhCM3dLVGpmdGNIdzcxTmFKZlg5V2t2OFI4ZWlqeWpNOUl2Y1cwZmRQem9YVTBPZEFTa09ZRlFIZHlCUFNiCjllNWhDSGFJczZia1hBOEs4YmZRazBSL0d6STcyVXArd0JrbnJnTlhZTXFudHJSa0ljNURER1g0b3VOc2lqUkoKSWtrWER2TjVBb0dCQU8veFQrNTYyQ2hwc3R2NUpvMi9ycFdGb05tZ3ZJT0RMRGxiamhHZEpqKytwNk1BdjFQWgo2d042WnozMmppUG1OYzdCK2hrQm40RFQvVkFpU3NLRG1SK09tUkg1TVNzQXh6aWRxU3lNcldxdG1lMDNBVzd6Cklja0FNTGdwWHhDdW1HMzRCM2Jxb3VUdGVRdm5WcmRlR2hvdUJ5OUJSMVpXbnRtWHVscVhyNUFmQW9HQkFNZnIKN29NVGwzdUVVeml5a0IzYmkxb0RYdUNjN01Qc3h0c1IwdElqZXc3RStwTGoyaUxXZUZuMGVhdnJYaHQ1ODRJbwpDZG90a1ZMMHhrZ1g3M2ZremxEd1hobTJVTXBaQmxzSzBnR09SaUYzd0ZMU0hJNmxRUmJkaXRIb0JqcDRGTEZzCitlanZKUDZ1ZitBekZ5cjBLTnc3TnpyaCthbFhFQ09RS2NqUXJlWjFBb0dBQXRLZzhScEszcmJYbnRUZ2lqeGUKRG01REJTeHA2MVlvdUFnR3ROaFhjZHFKV0ZhUzZhYWZxQ3ZSZVI0a2IvR3VZbDlQMU9sNitlWUVqZVBKWTE1dQo5N3NTdSs1bGtLN3lxUXpaeDZka0J1UkI4bE42VmRiUVorL3pvc2NCMGsxcmg2ZXFWdEROMThtZmFlOXZ5cnAxCnJpY3FlSGpaSVAvbDRJTnpjc3RrQ2xzQ2dZQmh5TVZkZVZ5emZuS1NIY3lkdmY5MzVJUW9pcmpIeiswbnc1MEIKU1hkc0x1NThvRlBXakY1TGFXZUZybGJXUzV6T1FiVW44UGZPd29pbFJJZk5kYTF3SzFGcmRDQXFDTWN5Q3FYVApPdnFVYmhVMHJTNW9tdTJ1T0dnbzZUcjZxRGMrM1JXVFdEMFpFTkxkSDBBcXMwZTFDSVdvR0ZWYi9ZaVlUSEFUCmwvWW03UUtCZ1FEcFYvSjRMakY5VzBlUlNXenFBaDN1TStCdzNNN2NEMUxnUlZ6ZWxGS2w2ZzRBMWNvdU8wbHAKalpkMkVMZDlzTHhBVENVeFhQZ0dDTjY0RVNZSi92ZUozUmJzMTMrU2xqdjRleTVKck1ieEhNRC9CU1ovY2VjaAp4aFNWNkJsMHVKb2tlMTRPMEJ3OHJzSUlxZTVZSUxqSlMwL2E2eTllSlJtaGZJVG9PZU5PTUE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Module ambassador_id: {self.ambassador_id} name: tls config: server: enabled: True secret: test-clientcert-server-secret client: enabled: True secret: test-clientcert-client-secret cert_required: True """) yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.target.path.k8s} prefix: /{self.name}/ service: {self.target.path.fqdn} """) def scheme(self) -> str: return "https" def queries(self): yield Query(self.url(self.name + "/"), insecure=True, client_crt=self.presto_crt, client_key=self.presto_key, client_cert_required=True, ca_cert=self.ca_cert) yield Query(self.url(self.name + "/"), insecure=True, error="handshake failure") def requirements(self): for r in super().requirements(): query = r[1] query.insecure = True query.client_cert = self.presto_crt query.client_key = self.presto_key query.client_cert_required = True query.ca_cert = self.ca_cert yield (r[0], query) class TLSOriginationSecret(AmbassadorTest): def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 kind: Secret metadata: name: test-origination-secret labels: kat-ambassador-id: tlsoriginationsecret type: kubernetes.io/tls data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Module ambassador_id: {self.ambassador_id} name: tls config: upstream: secret: test-origination-secret upstream-files: cert_chain_file: /tmp/ambassador/snapshots/default/secrets-decoded/test-origination-secret/F94E4DCF30ABC50DEF240AA8024599B67CC03991.crt private_key_file: /tmp/ambassador/snapshots/default/secrets-decoded/test-origination-secret/F94E4DCF30ABC50DEF240AA8024599B67CC03991.key """) yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.target.path.k8s} prefix: /{self.name}/ service: {self.target.path.fqdn} tls: upstream """) yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.target.path.k8s}-files prefix: /{self.name}-files/ service: {self.target.path.fqdn} tls: upstream-files """) def queries(self): yield Query(self.url(self.name + "/")) yield Query(self.url(self.name + "-files/")) def check(self): for r in self.results: assert r.backend.request.tls.enabled class TLS(AmbassadorTest): target: ServiceType def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 kind: Secret metadata: name: test-tls-secret labels: kat-ambassador-id: tls type: kubernetes.io/tls data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K --- apiVersion: v1 kind: Secret metadata: name: ambassador-certs labels: kat-ambassador-id: tls type: kubernetes.io/tls data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Module name: tls ambassador_id: {self.ambassador_id} config: server: enabled: True secret: test-tls-secret """) # need to include the ambassador_id unless you need some special # ambassador_id that isn't something that kat already knows about. # of mangling for the mapping name and prefix. For this simple test, # it's not necessary. yield self.target, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: tls_target_mapping prefix: /tls-target/ service: {self.target.path.fqdn} """) def scheme(self) -> str: return "https" def queries(self): yield Query(self.url("tls-target/"), insecure=True) class TLSInvalidSecret(AmbassadorTest): target: ServiceType def init(self): self.target = HTTP() def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Module name: tls ambassador_id: {self.ambassador_id} config: server: enabled: True secret: test-certs-secret-invalid missing-secret-key: cert_chain_file: /nonesuch bad-path-info: cert_chain_file: /nonesuch private_key_file: /nonesuch validation-without-termination: enabled: True secret: test-certs-secret-invalid ca_secret: ambassador-certs """) yield self.target, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: tls_target_mapping prefix: /tls-target/ service: {self.target.path.fqdn} """) def scheme(self) -> str: return "http" def queries(self): yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), phase=2) def check(self): errors = self.results[0].backend.response expected = set({ "TLSContext server found no certificate in secret test-certs-secret-invalid in namespace default, ignoring...", "TLSContext bad-path-info found no cert_chain_file '/nonesuch'", "TLSContext bad-path-info found no private_key_file '/nonesuch'", "TLSContext validation-without-termination found no certificate in secret test-certs-secret-invalid in namespace default, ignoring...", "TLSContext missing-secret-key: 'cert_chain_file' requires 'private_key_file' as well", }) current = set({}) for errsvc, errtext in errors: current.add(errtext) diff = expected - current assert len(diff) == 0, f'expected {len(expected)} errors, got {len(errors)}: Missing {diff}' class TLSContextTest(AmbassadorTest): def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 kind: Namespace metadata: name: secret-namespace --- apiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K kind: Secret metadata: name: test-tlscontext-secret-0 labels: kat-ambassador-id: tlscontext type: kubernetes.io/tls --- apiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K kind: Secret metadata: name: test-tlscontext-secret-1 namespace: secret-namespace labels: kat-ambassador-id: tlscontext type: kubernetes.io/tls --- apiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUlIWTY3cFNoZ3NyTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB5TUI0WERURTRNVEV3TVRFME1EUXhObG9YCkRUSTRNVEF5T1RFME1EUXhObG93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRJd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURjQThZdGgvUFdhT0dTCm9ObXZFSFoyNGpRN1BLTitENG93TEhXZWl1UmRtaEEwWU92VTN3cUczVnFZNFpwbFpBVjBQS2xELysyWlNGMTQKejh3MWVGNFFUelphWXh3eTkrd2ZITmtUREVwTWpQOEpNMk9FYnlrVVJ4VVJ2VzQrN0QzMEUyRXo1T1BseG1jMApNWU0vL0pINUVEUWhjaURybFlxZTFTUk1SQUxaZVZta2FBeXU2TkhKVEJ1ajBTSVB1ZExUY2grOTBxK3Jkd255CmZrVDF4M09UYW5iV2pub21FSmU3TXZ5NG12dnFxSUh1NDhTOUM4WmQxQkdWUGJ1OFYvVURyU1dROXpZQ1g0U0cKT2FzbDhDMFhtSDZrZW1oUERsRC9UdjB4dnlINXE1TVVjSGk0bUp0Titnem9iNTREd3pWR0VqZWY1TGVTMVY1RgowVEFQMGQrWEFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUWRGMEdRSGRxbHRoZG5RWXFWaXVtRXJsUk9mREFmCkJnTlZIU01FR0RBV2dCUWRGMEdRSGRxbHRoZG5RWXFWaXVtRXJsUk9mREFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBbUFLYkNsdUhFZS9JRmJ1QWJneDBNenV6aTkwd2xtQVBiOGdtTwpxdmJwMjl1T1ZzVlNtUUFkZFBuZEZhTVhWcDFaaG1UVjVDU1F0ZFgyQ1ZNVyswVzQ3Qy9DT0Jkb1NFUTl5akJmCmlGRGNseG04QU4yUG1hR1FhK3hvT1hnWkxYZXJDaE5LV0JTWlIrWktYTEpTTTlVYUVTbEhmNXVuQkxFcENqK2oKZEJpSXFGY2E3eElGUGtyKzBSRW9BVmMveFBubnNhS2pMMlV5Z0dqUWZGTnhjT042Y3VjYjZMS0pYT1pFSVRiNQpINjhKdWFSQ0tyZWZZK0l5aFFWVk5taWk3dE1wY1UyS2pXNXBrVktxVTNkS0l0RXEyVmtTZHpNVUtqTnhZd3FGCll6YnozNFQ1MENXbm9HbU5SQVdKc0xlVmlPWVUyNmR3YkFXZDlVYitWMDFRam43OAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktrd2dnU2xBZ0VBQW9JQkFRRGNBOFl0aC9QV2FPR1MKb05tdkVIWjI0alE3UEtOK0Q0b3dMSFdlaXVSZG1oQTBZT3ZVM3dxRzNWcVk0WnBsWkFWMFBLbEQvKzJaU0YxNAp6OHcxZUY0UVR6WmFZeHd5OSt3ZkhOa1RERXBNalA4Sk0yT0VieWtVUnhVUnZXNCs3RDMwRTJFejVPUGx4bWMwCk1ZTS8vSkg1RURRaGNpRHJsWXFlMVNSTVJBTFplVm1rYUF5dTZOSEpUQnVqMFNJUHVkTFRjaCs5MHErcmR3bnkKZmtUMXgzT1RhbmJXam5vbUVKZTdNdnk0bXZ2cXFJSHU0OFM5QzhaZDFCR1ZQYnU4Vi9VRHJTV1E5ellDWDRTRwpPYXNsOEMwWG1INmtlbWhQRGxEL1R2MHh2eUg1cTVNVWNIaTRtSnROK2d6b2I1NER3elZHRWplZjVMZVMxVjVGCjBUQVAwZCtYQWdNQkFBRUNnZ0VCQUk2U3I0anYwZForanJhN0gzVnZ3S1RYZnl0bjV6YVlrVjhZWUh3RjIyakEKbm9HaTBSQllIUFU2V2l3NS9oaDRFWVM2anFHdkptUXZYY3NkTldMdEJsK2hSVUtiZVRtYUtWd2NFSnRrV24xeQozUTQwUytnVk5OU2NINDRvYUZuRU0zMklWWFFRZnBKMjJJZ2RFY1dVUVcvWnpUNWpPK3dPTXc4c1plSTZMSEtLCkdoOENsVDkrRGUvdXFqbjNCRnQwelZ3cnFLbllKSU1DSWFrb2lDRmtIcGhVTURFNVkyU1NLaGFGWndxMWtLd0sKdHFvWFpKQnlzYXhnUTFRa21mS1RnRkx5WlpXT01mRzVzb1VrU1RTeURFRzFsYnVYcHpUbTlVSTlKU2lsK01yaAp1LzVTeXBLOHBCSHhBdFg5VXdiTjFiRGw3Sng1SWJyMnNoM0F1UDF4OUpFQ2dZRUE4dGNTM09URXNOUFpQZlptCk9jaUduOW9STTdHVmVGdjMrL05iL3JodHp1L1RQUWJBSzhWZ3FrS0dPazNGN1krY2txS1NTWjFnUkF2SHBsZEIKaTY0Y0daT1dpK01jMWZVcEdVV2sxdnZXbG1nTUlQVjVtbFpvOHowMlNTdXhLZTI1Y2VNb09oenFlay9vRmFtdgoyTmxFeTh0dEhOMUxMS3grZllhMkpGcWVycThDZ1lFQTUvQUxHSXVrU3J0K0dkektJLzV5cjdSREpTVzIzUTJ4CkM5ZklUTUFSL1Q4dzNsWGhyUnRXcmlHL3l0QkVPNXdTMVIwdDkydW1nVkhIRTA5eFFXbzZ0Tm16QVBNb1RSekMKd08yYnJqQktBdUJkQ0RISjZsMlFnOEhPQWovUncrK2x4bEN0VEI2YS8xWEZIZnNHUGhqMEQrWlJiWVZzaE00UgpnSVVmdmpmQ1Y1a0NnWUVBMzdzL2FieHJhdThEaTQ3a0NBQ3o1N3FsZHBiNk92V2d0OFF5MGE5aG0vSmhFQ3lVCkNML0VtNWpHeWhpMWJuV05yNXVRWTdwVzR0cG5pdDJCU2d1VFlBMFYrck8zOFhmNThZcTBvRTFPR3l5cFlBUkoKa09SanRSYUVXVTJqNEJsaGJZZjNtL0xnSk9oUnp3T1RPNXFSUTZHY1dhZVlod1ExVmJrelByTXUxNGtDZ1lCbwp4dEhjWnNqelVidm5wd3hTTWxKUStaZ1RvZlAzN0lWOG1pQk1POEJrclRWQVczKzFtZElRbkFKdWRxTThZb2RICmF3VW03cVNyYXV3SjF5dU1wNWFadUhiYkNQMjl5QzVheFh3OHRtZlk0TTVtTTBmSjdqYW9ydGFId1pqYmNObHMKdTJsdUo2MVJoOGVpZ1pJU1gyZHgvMVB0ckFhWUFCZDcvYWVYWU0wVWtRS0JnUUNVbkFIdmRQUGhIVnJDWU1rTgpOOFBEK0t0YmhPRks2S3MvdlgyUkcyRnFmQkJPQWV3bEo1d0xWeFBLT1RpdytKS2FSeHhYMkcvREZVNzduOEQvCkR5V2RjM2ZCQWQ0a1lJamZVaGRGa1hHNEFMUDZBNVFIZVN4NzNScTFLNWxMVWhPbEZqc3VPZ0NKS28wVlFmRC8KT05paDB6SzN5Wmc3aDVQamZ1TUdGb09OQWc9PQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== kind: Secret metadata: name: test-tlscontext-secret-2 labels: kat-ambassador-id: tlscontext type: kubernetes.io/tls """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.name}-same-prefix-1 prefix: /tls-context-same/ service: http://{self.target.path.fqdn} host: tls-context-host-1 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-same-context-1 hosts: - tls-context-host-1 secret: test-tlscontext-secret-1.secret-namespace min_tls_version: v1.0 max_tls_version: v1.3 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: Mapping name: {self.name}-same-prefix-2 prefix: /tls-context-same/ service: http://{self.target.path.fqdn} host: tls-context-host-2 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-same-context-2 hosts: - tls-context-host-2 secret: test-tlscontext-secret-2 alpn_protocols: h2,http/1.1 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: Module name: tls config: server: enabled: True secret: test-tlscontext-secret-0 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: Mapping name: {self.name}-other-mapping prefix: /{self.name}/ service: https://{self.target.path.fqdn} """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-no-secret min_tls_version: v1.0 max_tls_version: v1.3 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-same-context-error hosts: - tls-context-host-1 """) def scheme(self) -> str: return "https" @staticmethod def _go_close_connection_error(url): return "Get {}: EOF".format(url) def queries(self): yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), headers={"Host": "tls-context-host-2"}, insecure=True, sni=True) yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True) yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-2"}, expected=200, insecure=True, sni=True) yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-3"}, expected=404, insecure=True) yield Query(self.url("tls-context-different/"), headers={"Host": "tls-context-host-1"}, expected=404, insecure=True, sni=True) yield Query(self.url(self.name + "/"), insecure=True) yield Query(self.url(self.name + "/"), headers={"Host": "tls-context-host-3"}, insecure=True) yield Query(self.url(self.name + "/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True) # 7 - explicit Host header 2 wins, we'll get the SNI cert for this overlapping path yield Query(self.url(self.name + "/"), headers={"Host": "tls-context-host-2"}, expected=200, insecure=True, sni=True) def check(self): errors = self.results[0].json num_errors = len(errors) assert num_errors == 2, "expected 2 errors, got {} -\n{}".format(num_errors, errors) cert_err = errors[0] pkey_err = errors[1] assert cert_err[1] == 'TLSContext TLSContextTest-same-context-error is missing cert_chain_file' assert pkey_err[1] == 'TLSContext TLSContextTest-same-context-error is missing private_key_file' idx = 0 for result in self.results: if result.status == 200 and result.query.headers: host_header = result.query.headers['Host'] tls_common_name = result.tls[0]['Issuer']['CommonName'] # XXX Weirdness with the fallback cert here! You see, if we use host # tls-context-host-3 (or, really, anything except -1 or -2), then the # fallback cert actually has CN 'localhost'. We should replace this with # a real fallback cert, but for now, just hack the host_header. # # Ew. if host_header == 'tls-context-host-3': host_header = 'localhost' assert host_header == tls_common_name, "test %d wanted CN %s, but got %s" % (idx, host_header, tls_common_name) idx += 1 def requirements(self): # We're replacing super()'s requirements deliberately here. Without a Host header they can't work. yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True)) yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True)) yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-2"}, insecure=True, sni=True)) yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-2"}, insecure=True, sni=True)) class TLSContextProtocolMaxVersion(AmbassadorTest): # It appears not to be. # debug = True def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K kind: Secret metadata: name: secret.max-version labels: kat-ambassador-id: tlscontextprotocolmaxversion type: kubernetes.io/tls """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Module name: ambassador config: defaults: tls_secret_namespacing: False --- apiVersion: ambassador/v0 kind: Mapping name: {self.name}-same-prefix-1 prefix: /tls-context-same/ service: http://{self.target.path.fqdn} host: tls-context-host-1 --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-same-context-1 hosts: - tls-context-host-1 secret: secret.max-version min_tls_version: v1.1 max_tls_version: v1.2 """) def scheme(self) -> str: return "https" @staticmethod def _go_close_connection_error(url): return "Get {}: EOF".format(url) def queries(self): # ---- # XXX 2019-09-11 # These aren't actually reporting the negotiated version, alhough correct # ---- yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, minTLSv="v1.2", maxTLSv="v1.2") # This should give us TLS v1.1 yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, minTLSv="v1.0", maxTLSv="v1.1") # This should be an error. yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, minTLSv="v1.3", maxTLSv="v1.3", error=[ "tls: server selected unsupported protocol version 303", "tls: no supported versions satisfy MinVersion and MaxVersion", "tls: protocol version not supported" ]) def check(self): tls_0_version = self.results[0].backend.request.tls.negotiated_protocol_version tls_1_version = self.results[1].backend.request.tls.negotiated_protocol_version # See comment in queries for why these are None. They should be v1.2 and v1.1 respectively. assert tls_0_version == None, f"requesting TLS v1.2 got TLS {tls_0_version}" assert tls_1_version == None, f"requesting TLS v1.0-v1.1 got TLS {tls_1_version}" def requirements(self): # We're replacing super()'s requirements deliberately here. Without a Host header they can't work. yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True, minTLSv="v1.2")) yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True, minTLSv="v1.2")) class TLSContextProtocolMinVersion(AmbassadorTest): # It appears not to be. # debug = True def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K kind: Secret metadata: name: secret.min-version labels: kat-ambassador-id: tlscontextprotocolminversion type: kubernetes.io/tls """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.name}-same-prefix-1 prefix: /tls-context-same/ service: https://{self.target.path.fqdn} host: tls-context-host-1 --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-same-context-1 hosts: - tls-context-host-1 secret: secret.min-version secret_namespacing: False min_tls_version: v1.2 max_tls_version: v1.3 """) def scheme(self) -> str: return "https" @staticmethod def _go_close_connection_error(url): return "Get {}: EOF".format(url) def queries(self): # This should give v1.3, but it currently seems to give 1.2. yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, minTLSv="v1.2", maxTLSv="v1.3") # This should give v1.2 yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, minTLSv="v1.1", maxTLSv="v1.2") # This should be an error. yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, minTLSv="v1.0", maxTLSv="v1.0", error=[ "tls: server selected unsupported protocol version 303", "tls: no supported versions satisfy MinVersion and MaxVersion", "tls: protocol version not supported" ]) def check(self): tls_0_version = self.results[0].backend.request.tls.negotiated_protocol_version tls_1_version = self.results[1].backend.request.tls.negotiated_protocol_version # Hmmm. Why does Envoy prefer 1.2 to 1.3 here?? This may be a client thing -- have to # rebuild with Go 1.13. assert tls_0_version == "v1.2", f"requesting TLS v1.2-v1.3 got TLS {tls_0_version}" assert tls_1_version == "v1.2", f"requesting TLS v1.1-v1.2 got TLS {tls_1_version}" def requirements(self): # We're replacing super()'s requirements deliberately here. Without a Host header they can't work. yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True)) yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True)) class TLSContextCipherSuites(AmbassadorTest): def init(self): self.target = HTTP() def manifests(self) -> str: return super().manifests() + """ --- apiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K kind: Secret metadata: name: secret.cipher-suites labels: kat-ambassador-id: tlscontextciphersuites type: kubernetes.io/tls """ def config(self): yield self, self.format(""" --- apiVersion: ambassador/v0 kind: Mapping name: {self.name}-same-prefix-1 prefix: /tls-context-same/ service: https://{self.target.path.fqdn} host: tls-context-host-1 """) yield self, self.format(""" --- apiVersion: ambassador/v1 kind: TLSContext name: {self.name}-same-context-1 hosts: - tls-context-host-1 secret: secret.cipher-suites secret_namespacing: False max_tls_version: v1.2 cipher_suites: - ECDHE-RSA-AES128-GCM-SHA256 ecdh_curves: - P-256 """) def scheme(self) -> str: return "https" @staticmethod def _go_close_connection_error(url): return "Get {}: EOF".format(url) def queries(self): yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, cipherSuites=["TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"], maxTLSv="v1.2") yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, cipherSuites=["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"], maxTLSv="v1.2", error="tls: handshake failure",) yield Query(self.url("tls-context-same/"), headers={"Host": "tls-context-host-1"}, expected=200, insecure=True, sni=True, cipherSuites=["TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"], ecdhCurves=["X25519"], maxTLSv="v1.2", error="tls: handshake failure",) def check(self): tls_0_version = self.results[0].backend.request.tls.negotiated_protocol_version assert tls_0_version == "v1.2", f"requesting TLS v1.2 got TLS {tls_0_version}" def requirements(self): yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True)) yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
true
true
1c45fc1d3bc956ee5253c530f021440b4f006f32
45,154
py
Python
hangups/ui/__main__.py
zetorian/hangups
60715702fc23842a94c8d13e144a8bd0ce45654a
[ "MIT" ]
null
null
null
hangups/ui/__main__.py
zetorian/hangups
60715702fc23842a94c8d13e144a8bd0ce45654a
[ "MIT" ]
null
null
null
hangups/ui/__main__.py
zetorian/hangups
60715702fc23842a94c8d13e144a8bd0ce45654a
[ "MIT" ]
null
null
null
"""Reference chat client for hangups.""" import appdirs import asyncio import configargparse import contextlib import logging import os import sys import urwid import readlike import hangups from hangups.ui.emoticon import replace_emoticons from hangups.ui import notifier from hangups.ui.utils import get_conv_name, add_color_to_scheme # hangups used to require a fork of urwid called hangups-urwid which may still # be installed and create a conflict with the 'urwid' package name. See #198. if urwid.__version__ == '1.2.2-dev': sys.exit('error: hangups-urwid package is installed\n\n' 'Please uninstall hangups-urwid and urwid, and reinstall ' 'hangups.') LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' COL_SCHEMES = { # Very basic scheme with no colour 'default': { ('active_tab', '', ''), ('inactive_tab', 'standout', ''), ('msg_date', '', ''), ('msg_sender', '', ''), ('msg_self', '', ''), ('msg_text', '', ''), ('msg_text_self', '', ''), ('msg_selected', 'standout', ''), ('status_line', 'standout', ''), ('tab_background', 'standout', ''), }, 'solarized-dark': { ('active_tab', 'light gray', 'light blue'), ('inactive_tab', 'underline', 'light green'), ('msg_date', 'dark cyan', ''), ('msg_sender', 'dark blue', ''), ('msg_text_self', '', ''), ('msg_self', 'dark green', ''), ('msg_text', '', ''), ('msg_selected', 'standout', ''), ('status_line', 'standout', ''), ('tab_background', 'black,standout,underline', 'light green'), }, } COL_SCHEME_NAMES = ( 'active_tab', 'inactive_tab', 'msg_date', 'msg_sender', 'msg_self', 'msg_text', 'msg_text_self', 'status_line', 'tab_background' ) DISCREET_NOTIFICATION = notifier.Notification( 'hangups', 'Conversation', 'New message' ) class HangupsDisconnected(Exception): """Raised when hangups is disconnected.""" class ChatUI(object): """User interface for hangups.""" def __init__(self, refresh_token_path, keybindings, palette, palette_colors, datetimefmt, notifier_, discreet_notifications): """Start the user interface.""" self._keys = keybindings self._datetimefmt = datetimefmt self._notifier = notifier_ self._discreet_notifications = discreet_notifications set_terminal_title('hangups') # These are populated by on_connect when it's called. self._conv_widgets = {} # {conversation_id: ConversationWidget} self._tabbed_window = None # TabbedWindowWidget self._conv_list = None # hangups.ConversationList self._user_list = None # hangups.UserList self._coroutine_queue = CoroutineQueue() self._exception = None # TODO Add urwid widget for getting auth. try: cookies = hangups.auth.get_auth_stdin(refresh_token_path) except hangups.GoogleAuthError as e: sys.exit('Login failed ({})'.format(e)) self._client = hangups.Client(cookies) self._client.on_connect.add_observer(self._on_connect) loop = asyncio.get_event_loop() loop.set_exception_handler(self._exception_handler) try: self._urwid_loop = urwid.MainLoop( LoadingWidget(), palette, handle_mouse=False, input_filter=self._input_filter, event_loop=urwid.AsyncioEventLoop(loop=loop) ) except urwid.AttrSpecError as e: # Fail gracefully for invalid colour options. sys.exit(e) self._urwid_loop.screen.set_terminal_properties(colors=palette_colors) self._urwid_loop.start() coros = [self._connect(), self._coroutine_queue.consume()] # Enable bracketed paste mode after the terminal has been switched to # the alternate screen (after MainLoop.start() to work around bug # 729533 in VTE. with bracketed_paste_mode(): try: # Run all the coros, until they all complete or one raises an # exception. In the normal case, HangupsDisconnected will be # raised. loop.run_until_complete(asyncio.gather(*coros)) except HangupsDisconnected: pass finally: # Clean up urwid. self._urwid_loop.stop() # Cancel all of the coros, and wait for them to shut down. task = asyncio.gather(*coros, return_exceptions=True) task.cancel() try: loop.run_until_complete(task) except asyncio.CancelledError: # In Python 3.7, asyncio.gather no longer swallows # CancelledError, so we need to ignore it. pass loop.close() # If an exception was stored, raise it now. This is used for exceptions # originating in urwid callbacks. if self._exception: raise self._exception # pylint: disable=raising-bad-type async def _connect(self): await self._client.connect() raise HangupsDisconnected() def _exception_handler(self, _loop, context): """Handle exceptions from the asyncio loop.""" # Start a graceful shutdown. self._coroutine_queue.put(self._client.disconnect()) # Store the exception to be re-raised later. If the context doesn't # contain an exception, create one containing the error message. default_exception = Exception(context.get('message')) self._exception = context.get('exception', default_exception) def _input_filter(self, keys, _): """Handle global keybindings.""" if keys == [self._keys['menu']]: if self._urwid_loop.widget == self._tabbed_window: self._show_menu() else: self._hide_menu() elif keys == [self._keys['quit']]: self._coroutine_queue.put(self._client.disconnect()) else: return keys def _show_menu(self): """Show the overlay menu.""" # If the current widget in the TabbedWindowWidget has a menu, # overlay it on the TabbedWindowWidget. current_widget = self._tabbed_window.get_current_widget() if hasattr(current_widget, 'get_menu_widget'): menu_widget = current_widget.get_menu_widget(self._hide_menu) overlay = urwid.Overlay(menu_widget, self._tabbed_window, align='center', width=('relative', 80), valign='middle', height=('relative', 80)) self._urwid_loop.widget = overlay def _hide_menu(self): """Hide the overlay menu.""" self._urwid_loop.widget = self._tabbed_window def get_conv_widget(self, conv_id): """Return an existing or new ConversationWidget.""" if conv_id not in self._conv_widgets: set_title_cb = (lambda widget, title: self._tabbed_window.set_tab(widget, title=title)) widget = ConversationWidget( self._client, self._coroutine_queue, self._conv_list.get(conv_id), set_title_cb, self._keys, self._datetimefmt ) self._conv_widgets[conv_id] = widget return self._conv_widgets[conv_id] def add_conversation_tab(self, conv_id, switch=False): """Add conversation tab if not present, and optionally switch to it.""" conv_widget = self.get_conv_widget(conv_id) self._tabbed_window.set_tab(conv_widget, switch=switch, title=conv_widget.title) def on_select_conversation(self, conv_id): """Called when the user selects a new conversation to listen to.""" # switch to new or existing tab for the conversation self.add_conversation_tab(conv_id, switch=True) async def _on_connect(self): """Handle connecting for the first time.""" self._user_list, self._conv_list = ( await hangups.build_user_conversation_list(self._client) ) self._conv_list.on_event.add_observer(self._on_event) # show the conversation menu conv_picker = ConversationPickerWidget(self._conv_list, self.on_select_conversation, self._keys) self._tabbed_window = TabbedWindowWidget(self._keys) self._tabbed_window.set_tab(conv_picker, switch=True, title='Conversations') self._urwid_loop.widget = self._tabbed_window def _on_event(self, conv_event): """Open conversation tab for new messages & pass events to notifier.""" conv = self._conv_list.get(conv_event.conversation_id) user = conv.get_user(conv_event.user_id) show_notification = all(( isinstance(conv_event, hangups.ChatMessageEvent), not user.is_self, not conv.is_quiet, )) if show_notification: self.add_conversation_tab(conv_event.conversation_id) if self._discreet_notifications: notification = DISCREET_NOTIFICATION else: notification = notifier.Notification( user.full_name, get_conv_name(conv), conv_event.text ) self._notifier.send(notification) class CoroutineQueue: """Coroutine queue for the user interface. Urwid executes callback functions for user input rather than coroutines. This creates a problem if we need to execute a coroutine in response to user input. One option is to use asyncio.ensure_future to execute a "fire and forget" coroutine. If we do this, exceptions will be logged instead of propagated, which can obscure problems. This class allows callbacks to place coroutines into a queue, and have them executed by another coroutine. Exceptions will be propagated from the consume method. """ def __init__(self): self._queue = asyncio.Queue() def put(self, coro): """Put a coroutine in the queue to be executed.""" # Avoid logging when a coroutine is queued or executed to avoid log # spam from coroutines that are started on every keypress. assert asyncio.iscoroutine(coro) self._queue.put_nowait(coro) async def consume(self): """Consume coroutines from the queue by executing them.""" while True: coro = await self._queue.get() assert asyncio.iscoroutine(coro) await coro class WidgetBase(urwid.WidgetWrap): """Base for UI Widgets This class overrides the property definition for the method ``keypress`` in ``urwid.WidgetWrap``. Using a method that overrides the property saves many pylint suppressions. Args: target: urwid.Widget instance """ def keypress(self, size, key): """forward the call""" # pylint:disable=not-callable, useless-super-delegation return super().keypress(size, key) class LoadingWidget(WidgetBase): """Widget that shows a loading indicator.""" def __init__(self): # show message in the center of the screen super().__init__(urwid.Filler( urwid.Text('Connecting...', align='center') )) class RenameConversationDialog(WidgetBase): """Dialog widget for renaming a conversation.""" def __init__(self, coroutine_queue, conversation, on_cancel, on_save, keybindings): self._coroutine_queue = coroutine_queue self._conversation = conversation edit = urwid.Edit(edit_text=get_conv_name(conversation)) items = [ urwid.Text('Rename conversation:'), edit, urwid.Button( 'Save', on_press=lambda _: self._rename(edit.edit_text, on_save) ), urwid.Button('Cancel', on_press=lambda _: on_cancel()), ] list_walker = urwid.SimpleFocusListWalker(items) list_box = ListBox(keybindings, list_walker) super().__init__(list_box) def _rename(self, name, callback): """Rename conversation and call callback.""" self._coroutine_queue.put(self._conversation.rename(name)) callback() class ConversationMenu(WidgetBase): """Menu for conversation actions.""" def __init__(self, coroutine_queue, conversation, close_callback, keybindings): rename_dialog = RenameConversationDialog( coroutine_queue, conversation, lambda: frame.contents.__setitem__('body', (list_box, None)), close_callback, keybindings ) items = [ urwid.Text( 'Conversation name: {}'.format(get_conv_name(conversation)) ), urwid.Button( 'Change Conversation Name', on_press=lambda _: frame.contents.__setitem__( 'body', (rename_dialog, None) ) ), urwid.Divider('-'), urwid.Button('Back', on_press=lambda _: close_callback()), ] list_walker = urwid.SimpleFocusListWalker(items) list_box = ListBox(keybindings, list_walker) frame = urwid.Frame(list_box) padding = urwid.Padding(frame, left=1, right=1) line_box = urwid.LineBox(padding, title='Conversation Menu') super().__init__(line_box) class ConversationButton(WidgetBase): """Button that shows the name and unread message count of conversation.""" def __init__(self, conversation, on_press): conversation.on_event.add_observer(self._on_event) # Need to update on watermark notifications as well since no event is # received when the user marks messages as read. conversation.on_watermark_notification.add_observer(self._on_event) self._conversation = conversation self._button = urwid.Button(self._get_label(), on_press=on_press, user_data=conversation.id_) super().__init__(self._button) def _get_label(self): """Return the button's label generated from the conversation.""" return get_conv_name(self._conversation, show_unread=True) def _on_event(self, _): """Update the button's label when an event occurs.""" self._button.set_label(self._get_label()) @property def last_modified(self): """Last modified date of conversation, used for sorting.""" return self._conversation.last_modified class ConversationListWalker(urwid.SimpleFocusListWalker): """ListWalker that maintains a list of ConversationButtons. ConversationButtons are kept in order of last modified. """ # pylint: disable=abstract-method def __init__(self, conversation_list, on_select): self._conversation_list = conversation_list self._conversation_list.on_event.add_observer(self._on_event) self._on_press = lambda button, conv_id: on_select(conv_id) convs = sorted(conversation_list.get_all(), reverse=True, key=lambda c: c.last_modified) buttons = [ConversationButton(conv, on_press=self._on_press) for conv in convs] super().__init__(buttons) def _on_event(self, _): """Re-order the conversations when an event occurs.""" # TODO: handle adding new conversations self.sort(key=lambda conv_button: conv_button.last_modified, reverse=True) class ListBox(WidgetBase): """ListBox widget supporting alternate keybindings.""" def __init__(self, keybindings, list_walker): self._keybindings = keybindings super().__init__(urwid.ListBox(list_walker)) def keypress(self, size, key): # Handle alternate up/down keybindings key = super().keypress(size, key) if key == self._keybindings['down']: super().keypress(size, 'down') elif key == self._keybindings['up']: super().keypress(size, 'up') elif key == self._keybindings['page_up']: super().keypress(size, 'page up') elif key == self._keybindings['page_down']: super().keypress(size, 'page down') else: return key class ConversationPickerWidget(WidgetBase): """ListBox widget for picking a conversation from a list.""" def __init__(self, conversation_list, on_select, keybindings): list_walker = ConversationListWalker(conversation_list, on_select) list_box = ListBox(keybindings, list_walker) widget = urwid.Padding(list_box, left=2, right=2) super().__init__(widget) class ReturnableEdit(urwid.Edit): """Edit widget that clears itself and calls a function on return.""" def __init__(self, on_return, keybindings, caption=None): super().__init__(caption=caption, multiline=True) self._on_return = on_return self._keys = keybindings self._paste_mode = False def keypress(self, size, key): if key == 'begin paste': self._paste_mode = True elif key == 'end paste': self._paste_mode = False elif key == 'enter' and not self._paste_mode: self._on_return(self.get_edit_text()) self.set_edit_text('') elif key not in self._keys.values() and key in readlike.keys(): text, pos = readlike.edit(self.edit_text, self.edit_pos, key) self.set_edit_text(text) self.set_edit_pos(pos) else: return super().keypress(size, key) class StatusLineWidget(WidgetBase): """Widget for showing status messages. If the client is disconnected, show a reconnecting message. If a temporary message is showing, show the temporary message. If someone is typing, show a typing messages. """ _MESSAGE_DELAY_SECS = 10 def __init__(self, client, conversation): self._typing_statuses = {} self._conversation = conversation self._conversation.on_event.add_observer(self._on_event) self._conversation.on_typing.add_observer(self._on_typing) self._widget = urwid.Text('', align='center') self._is_connected = True self._message = None self._message_handle = None client.on_disconnect.add_observer(self._on_disconnect) client.on_reconnect.add_observer(self._on_reconnect) super().__init__(urwid.AttrMap(self._widget, 'status_line')) def show_message(self, message_str): """Show a temporary message.""" if self._message_handle is not None: self._message_handle.cancel() self._message_handle = asyncio.get_event_loop().call_later( self._MESSAGE_DELAY_SECS, self._clear_message ) self._message = message_str self._update() def _clear_message(self): """Clear the temporary message.""" self._message = None self._message_handle = None self._update() def _on_disconnect(self): """Show reconnecting message when disconnected.""" self._is_connected = False self._update() def _on_reconnect(self): """Hide reconnecting message when reconnected.""" self._is_connected = True self._update() def _on_event(self, conv_event): """Make users stop typing when they send a message.""" if isinstance(conv_event, hangups.ChatMessageEvent): self._typing_statuses[conv_event.user_id] = ( hangups.TYPING_TYPE_STOPPED ) self._update() def _on_typing(self, typing_message): """Handle typing updates.""" self._typing_statuses[typing_message.user_id] = typing_message.status self._update() def _update(self): """Update status text.""" typing_users = [self._conversation.get_user(user_id) for user_id, status in self._typing_statuses.items() if status == hangups.TYPING_TYPE_STARTED] displayed_names = [user.first_name for user in typing_users if not user.is_self] if displayed_names: typing_message = '{} {} typing...'.format( ', '.join(sorted(displayed_names)), 'is' if len(displayed_names) == 1 else 'are' ) else: typing_message = '' if not self._is_connected: self._widget.set_text("RECONNECTING...") elif self._message is not None: self._widget.set_text(self._message) else: self._widget.set_text(typing_message) class MessageWidget(WidgetBase): """Widget for displaying a single message in a conversation.""" def __init__(self, timestamp, text, datetimefmt, user=None, show_date=False): # Save the timestamp as an attribute for sorting. self.timestamp = timestamp text = [ ('msg_date', self._get_date_str(timestamp, datetimefmt, show_date=show_date) + ' '), ('msg_text_self' if user is not None and user.is_self else 'msg_text', text) ] if user is not None: text.insert(1, ('msg_self' if user.is_self else 'msg_sender', user.first_name + ': ')) self._widget = urwid.SelectableIcon(text, cursor_position=0) super().__init__(urwid.AttrMap( self._widget, '', { # If the widget is focused, map every other display attribute # to 'msg_selected' so the entire message is highlighted. None: 'msg_selected', 'msg_date': 'msg_selected', 'msg_text_self': 'msg_selected', 'msg_text': 'msg_selected', 'msg_self': 'msg_selected', 'msg_sender': 'msg_selected', } )) @staticmethod def _get_date_str(timestamp, datetimefmt, show_date=False): """Convert UTC datetime into user interface string.""" fmt = '' if show_date: fmt += '\n'+datetimefmt.get('date', '')+'\n' fmt += datetimefmt.get('time', '') return timestamp.astimezone(tz=None).strftime(fmt) def __lt__(self, other): return self.timestamp < other.timestamp @staticmethod def from_conversation_event(conversation, conv_event, prev_conv_event, datetimefmt): """Return MessageWidget representing a ConversationEvent. Returns None if the ConversationEvent does not have a widget representation. """ user = conversation.get_user(conv_event.user_id) # Check whether the previous event occurred on the same day as this # event. if prev_conv_event is not None: is_new_day = (conv_event.timestamp.astimezone(tz=None).date() != prev_conv_event.timestamp.astimezone(tz=None).date()) else: is_new_day = False if isinstance(conv_event, hangups.ChatMessageEvent): return MessageWidget(conv_event.timestamp, conv_event.text, datetimefmt, user, show_date=is_new_day) elif isinstance(conv_event, hangups.RenameEvent): if conv_event.new_name == '': text = ('{} cleared the conversation name' .format(user.first_name)) else: text = ('{} renamed the conversation to {}' .format(user.first_name, conv_event.new_name)) return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day) elif isinstance(conv_event, hangups.MembershipChangeEvent): event_users = [conversation.get_user(user_id) for user_id in conv_event.participant_ids] names = ', '.join([user.full_name for user in event_users]) if conv_event.type_ == hangups.MEMBERSHIP_CHANGE_TYPE_JOIN: text = ('{} added {} to the conversation' .format(user.first_name, names)) else: # LEAVE text = ('{} left the conversation'.format(names)) return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day) elif isinstance(conv_event, hangups.HangoutEvent): text = { hangups.HANGOUT_EVENT_TYPE_START: ( 'A Hangout call is starting.' ), hangups.HANGOUT_EVENT_TYPE_END: ( 'A Hangout call ended.' ), hangups.HANGOUT_EVENT_TYPE_ONGOING: ( 'A Hangout call is ongoing.' ), }.get(conv_event.event_type, 'Unknown Hangout call event.') return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day) elif isinstance(conv_event, hangups.GroupLinkSharingModificationEvent): status_on = hangups.GROUP_LINK_SHARING_STATUS_ON status_text = ('on' if conv_event.new_status == status_on else 'off') text = '{} turned {} joining by link.'.format(user.first_name, status_text) return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day) else: # conv_event is a generic hangups.ConversationEvent. text = 'Unknown conversation event' return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day) class ConversationEventListWalker(urwid.ListWalker): """ListWalker for ConversationEvents. The position may be an event ID or POSITION_LOADING. """ POSITION_LOADING = 'loading' def __init__(self, coroutine_queue, conversation, datetimefmt): self._coroutine_queue = coroutine_queue # CoroutineQueue self._conversation = conversation # Conversation self._is_scrolling = False # Whether the user is trying to scroll up self._is_loading = False # Whether we're currently loading more events self._first_loaded = False # Whether the first event is loaded self._datetimefmt = datetimefmt # Focus position is the first event ID, or POSITION_LOADING. self._focus_position = (conversation.events[-1].id_ if conversation.events else self.POSITION_LOADING) self._conversation.on_event.add_observer(self._handle_event) def _handle_event(self, conv_event): """Handle updating and scrolling when a new event is added. Automatically scroll down to show the new text if the bottom is showing. This allows the user to scroll up to read previous messages while new messages are arriving. """ if not self._is_scrolling: self.set_focus(conv_event.id_) else: self._modified() async def _load(self): """Load more events for this conversation.""" try: conv_events = await self._conversation.get_events( self._conversation.events[0].id_ ) except (IndexError, hangups.NetworkError): conv_events = [] if not conv_events: self._first_loaded = True if self._focus_position == self.POSITION_LOADING and conv_events: # If the loading indicator is still focused, and we loaded more # events, set focus on the first new event so the loaded # indicator is replaced. self.set_focus(conv_events[-1].id_) else: # Otherwise, still need to invalidate in case the loading # indicator is showing but not focused. self._modified() self._is_loading = False def __getitem__(self, position): """Return widget at position or raise IndexError.""" if position == self.POSITION_LOADING: if self._first_loaded: # TODO: Show the full date the conversation was created. return urwid.Text('No more messages', align='center') else: # Don't try to load while we're already loading. if not self._is_loading and not self._first_loaded: self._is_loading = True self._coroutine_queue.put(self._load()) return urwid.Text('Loading...', align='center') try: # When creating the widget, also pass the previous event so a # timestamp can be shown if this event occurred on a different day. # Get the previous event, or None if it isn't loaded or doesn't # exist. prev_position = self._get_position(position, prev=True) if prev_position == self.POSITION_LOADING: prev_event = None else: prev_event = self._conversation.get_event(prev_position) return MessageWidget.from_conversation_event( self._conversation, self._conversation.get_event(position), prev_event, self._datetimefmt ) except KeyError: raise IndexError('Invalid position: {}'.format(position)) def _get_position(self, position, prev=False): """Return the next/previous position or raise IndexError.""" if position == self.POSITION_LOADING: if prev: raise IndexError('Reached last position') else: return self._conversation.events[0].id_ else: ev = self._conversation.next_event(position, prev=prev) if ev is None: if prev: return self.POSITION_LOADING else: raise IndexError('Reached first position') else: return ev.id_ def next_position(self, position): """Return the position below position or raise IndexError.""" return self._get_position(position) def prev_position(self, position): """Return the position above position or raise IndexError.""" return self._get_position(position, prev=True) def set_focus(self, position): """Set the focus to position or raise IndexError.""" self._focus_position = position self._modified() # If we set focus to anywhere but the last position, the user if # scrolling up: try: self.next_position(position) except IndexError: self._is_scrolling = False else: self._is_scrolling = True def get_focus(self): """Return (widget, position) tuple.""" return (self[self._focus_position], self._focus_position) class ConversationWidget(WidgetBase): """Widget for interacting with a conversation.""" def __init__(self, client, coroutine_queue, conversation, set_title_cb, keybindings, datetimefmt): self._client = client self._coroutine_queue = coroutine_queue self._conversation = conversation self._conversation.on_event.add_observer(self._on_event) self._conversation.on_watermark_notification.add_observer( self._on_watermark_notification ) self._keys = keybindings self.title = '' self._set_title_cb = set_title_cb self._set_title() self._list_walker = ConversationEventListWalker( coroutine_queue, conversation, datetimefmt ) self._list_box = ListBox(keybindings, self._list_walker) self._status_widget = StatusLineWidget(client, conversation) self._widget = urwid.Pile([ ('weight', 1, self._list_box), ('pack', self._status_widget), ('pack', ReturnableEdit(self._on_return, keybindings, caption='Send message: ')), ]) # focus the edit widget by default self._widget.focus_position = 2 # Display any old ConversationEvents already attached to the # conversation. for event in self._conversation.events: self._on_event(event) super().__init__(self._widget) def get_menu_widget(self, close_callback): """Return the menu widget associated with this widget.""" return ConversationMenu( self._coroutine_queue, self._conversation, close_callback, self._keys ) def keypress(self, size, key): """Handle marking messages as read and keeping client active.""" # Set the client as active. self._coroutine_queue.put(self._client.set_active()) # Mark the newest event as read. self._coroutine_queue.put(self._conversation.update_read_timestamp()) return super().keypress(size, key) def _set_title(self): """Update this conversation's tab title.""" self.title = get_conv_name(self._conversation, show_unread=True, truncate=True) self._set_title_cb(self, self.title) def _on_return(self, text): """Called when the user presses return on the send message widget.""" # Ignore if the user hasn't typed a message. if not text: return elif text.startswith('/image') and len(text.split(' ')) == 2: # Temporary UI for testing image uploads filename = text.split(' ')[1] image_file = open(filename, 'rb') text = '' else: image_file = None text = replace_emoticons(text) segments = hangups.ChatMessageSegment.from_str(text) self._coroutine_queue.put( self._handle_send_message( self._conversation.send_message( segments, image_file=image_file ) ) ) async def _handle_send_message(self, coro): """Handle showing an error if a message fails to send.""" try: await coro except hangups.NetworkError: self._status_widget.show_message('Failed to send message') def _on_watermark_notification(self, _): """Handle watermark changes for this conversation.""" # Update the unread count in the title. self._set_title() def _on_event(self, _): """Display a new conversation message.""" # Update the title in case unread count or conversation name changed. self._set_title() class TabbedWindowWidget(WidgetBase): """A widget that displays a list of widgets via a tab bar.""" def __init__(self, keybindings): self._widgets = [] # [urwid.Widget] self._widget_title = {} # {urwid.Widget: str} self._tab_index = None # int self._keys = keybindings self._tabs = urwid.Text('') self._frame = urwid.Frame(None) super().__init__(urwid.Pile([ ('pack', urwid.AttrMap(self._tabs, 'tab_background')), ('weight', 1, self._frame), ])) def get_current_widget(self): """Return the widget in the current tab.""" return self._widgets[self._tab_index] def _update_tabs(self): """Update tab display.""" text = [] for num, widget in enumerate(self._widgets): palette = ('active_tab' if num == self._tab_index else 'inactive_tab') text += [ (palette, ' {} '.format(self._widget_title[widget])), ('tab_background', ' '), ] self._tabs.set_text(text) self._frame.contents['body'] = (self._widgets[self._tab_index], None) def keypress(self, size, key): """Handle keypresses for changing tabs.""" key = super().keypress(size, key) num_tabs = len(self._widgets) if key == self._keys['prev_tab']: self._tab_index = (self._tab_index - 1) % num_tabs self._update_tabs() elif key == self._keys['next_tab']: self._tab_index = (self._tab_index + 1) % num_tabs self._update_tabs() elif key == self._keys['close_tab']: # Don't allow closing the Conversations tab if self._tab_index > 0: curr_tab = self._widgets[self._tab_index] self._widgets.remove(curr_tab) del self._widget_title[curr_tab] self._tab_index -= 1 self._update_tabs() else: return key def set_tab(self, widget, switch=False, title=None): """Add or modify a tab. If widget is not a tab, it will be added. If switch is True, switch to this tab. If title is given, set the tab's title. """ if widget not in self._widgets: self._widgets.append(widget) self._widget_title[widget] = '' if switch: self._tab_index = self._widgets.index(widget) if title: self._widget_title[widget] = title self._update_tabs() def set_terminal_title(title): """Use an xterm escape sequence to set the terminal title.""" sys.stdout.write("\x1b]2;{}\x07".format(title)) @contextlib.contextmanager def bracketed_paste_mode(): """Context manager for enabling/disabling bracketed paste mode.""" sys.stdout.write('\x1b[?2004h') try: yield finally: sys.stdout.write('\x1b[?2004l') def dir_maker(path): """Create a directory if it does not exist.""" directory = os.path.dirname(path) if directory != '' and not os.path.isdir(directory): try: os.makedirs(directory) except OSError as e: sys.exit('Failed to create directory: {}'.format(e)) NOTIFIER_TYPES = { 'none': notifier.Notifier, 'default': notifier.DefaultNotifier, 'bell': notifier.BellNotifier, 'dbus': notifier.DbusNotifier, 'apple': notifier.AppleNotifier, } def get_notifier(notification_type, disable_notifications): if disable_notifications: return notifier.Notifier() else: return NOTIFIER_TYPES[notification_type]() def main(): """Main entry point.""" # Build default paths for files. dirs = appdirs.AppDirs('hangups', 'hangups') default_log_path = os.path.join(dirs.user_log_dir, 'hangups.log') default_token_path = os.path.join(dirs.user_cache_dir, 'refresh_token.txt') default_config_path = 'hangups.conf' user_config_path = os.path.join(dirs.user_config_dir, 'hangups.conf') # Create a default empty config file if does not exist. dir_maker(user_config_path) if not os.path.isfile(user_config_path): with open(user_config_path, 'a') as cfg: cfg.write("") parser = configargparse.ArgumentParser( prog='hangups', default_config_files=[default_config_path, user_config_path], formatter_class=configargparse.ArgumentDefaultsHelpFormatter, add_help=False, # Disable help so we can add it to the correct group. ) general_group = parser.add_argument_group('General') general_group.add('-h', '--help', action='help', help='show this help message and exit') general_group.add('--token-path', default=default_token_path, help='path used to store OAuth refresh token') general_group.add('--date-format', default='< %y-%m-%d >', help='date format string') general_group.add('--time-format', default='(%I:%M:%S %p)', help='time format string') general_group.add('-c', '--config', help='configuration file path', is_config_file=True, default=user_config_path) general_group.add('-v', '--version', action='version', version='hangups {}'.format(hangups.__version__)) general_group.add('-d', '--debug', action='store_true', help='log detailed debugging messages') general_group.add('--manual-login', action='store_true', help='enable manual login method using browser') general_group.add('--log', default=default_log_path, help='log file path') key_group = parser.add_argument_group('Keybindings') key_group.add('--key-next-tab', default='ctrl d', help='keybinding for next tab') key_group.add('--key-prev-tab', default='ctrl u', help='keybinding for previous tab') key_group.add('--key-close-tab', default='ctrl w', help='keybinding for close tab') key_group.add('--key-quit', default='ctrl e', help='keybinding for quitting') key_group.add('--key-menu', default='ctrl n', help='keybinding for context menu') key_group.add('--key-up', default='k', help='keybinding for alternate up key') key_group.add('--key-down', default='j', help='keybinding for alternate down key') key_group.add('--key-page-up', default='ctrl b', help='keybinding for alternate page up') key_group.add('--key-page-down', default='ctrl f', help='keybinding for alternate page down') notification_group = parser.add_argument_group('Notifications') # deprecated in favor of --notification-type=none: notification_group.add('-n', '--disable-notifications', action='store_true', help=configargparse.SUPPRESS) notification_group.add('-D', '--discreet-notifications', action='store_true', help='hide message details in notifications') notification_group.add('--notification-type', choices=sorted(NOTIFIER_TYPES.keys()), default='default', help='type of notifications to create') # add color scheme options col_group = parser.add_argument_group('Colors') col_group.add('--col-scheme', choices=COL_SCHEMES.keys(), default='default', help='colour scheme to use') col_group.add('--col-palette-colors', choices=('16', '88', '256'), default=16, help='Amount of available colors') for name in COL_SCHEME_NAMES: col_group.add('--col-' + name.replace('_', '-') + '-fg', help=name + ' foreground color') col_group.add('--col-' + name.replace('_', '-') + '-bg', help=name + ' background color') args = parser.parse_args() # Create all necessary directories. for path in [args.log, args.token_path]: dir_maker(path) logging.basicConfig(filename=args.log, level=logging.DEBUG if args.debug else logging.WARNING, format=LOG_FORMAT) # urwid makes asyncio's debugging logs VERY noisy, so adjust the log level: logging.getLogger('asyncio').setLevel(logging.WARNING) datetimefmt = {'date': args.date_format, 'time': args.time_format} # setup color scheme palette_colors = int(args.col_palette_colors) col_scheme = COL_SCHEMES[args.col_scheme] for name in COL_SCHEME_NAMES: col_scheme = add_color_to_scheme(col_scheme, name, getattr(args, 'col_' + name + '_fg'), getattr(args, 'col_' + name + '_bg'), palette_colors) keybindings = { 'next_tab': args.key_next_tab, 'prev_tab': args.key_prev_tab, 'close_tab': args.key_close_tab, 'quit': args.key_quit, 'menu': args.key_menu, 'up': args.key_up, 'down': args.key_down, 'page_up': args.key_page_up, 'page_down': args.key_page_down, } notifier_ = get_notifier( args.notification_type, args.disable_notifications ) if(args.manual_login): hangups.auth.get_auth_manual(args.token_path) sys.exit(1) try: ChatUI( args.token_path, keybindings, col_scheme, palette_colors, datetimefmt, notifier_, args.discreet_notifications ) except KeyboardInterrupt: sys.exit('Caught KeyboardInterrupt, exiting abnormally') if __name__ == '__main__': main()
39.094372
79
0.607477
import appdirs import asyncio import configargparse import contextlib import logging import os import sys import urwid import readlike import hangups from hangups.ui.emoticon import replace_emoticons from hangups.ui import notifier from hangups.ui.utils import get_conv_name, add_color_to_scheme rwid.__version__ == '1.2.2-dev': sys.exit('error: hangups-urwid package is installed\n\n' 'Please uninstall hangups-urwid and urwid, and reinstall ' 'hangups.') LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' COL_SCHEMES = { 'default': { ('active_tab', '', ''), ('inactive_tab', 'standout', ''), ('msg_date', '', ''), ('msg_sender', '', ''), ('msg_self', '', ''), ('msg_text', '', ''), ('msg_text_self', '', ''), ('msg_selected', 'standout', ''), ('status_line', 'standout', ''), ('tab_background', 'standout', ''), }, 'solarized-dark': { ('active_tab', 'light gray', 'light blue'), ('inactive_tab', 'underline', 'light green'), ('msg_date', 'dark cyan', ''), ('msg_sender', 'dark blue', ''), ('msg_text_self', '', ''), ('msg_self', 'dark green', ''), ('msg_text', '', ''), ('msg_selected', 'standout', ''), ('status_line', 'standout', ''), ('tab_background', 'black,standout,underline', 'light green'), }, } COL_SCHEME_NAMES = ( 'active_tab', 'inactive_tab', 'msg_date', 'msg_sender', 'msg_self', 'msg_text', 'msg_text_self', 'status_line', 'tab_background' ) DISCREET_NOTIFICATION = notifier.Notification( 'hangups', 'Conversation', 'New message' ) class HangupsDisconnected(Exception): class ChatUI(object): def __init__(self, refresh_token_path, keybindings, palette, palette_colors, datetimefmt, notifier_, discreet_notifications): self._keys = keybindings self._datetimefmt = datetimefmt self._notifier = notifier_ self._discreet_notifications = discreet_notifications set_terminal_title('hangups') self._conv_widgets = {} # {conversation_id: ConversationWidget} self._tabbed_window = None # TabbedWindowWidget self._conv_list = None # hangups.ConversationList self._user_list = None # hangups.UserList self._coroutine_queue = CoroutineQueue() self._exception = None # TODO Add urwid widget for getting auth. try: cookies = hangups.auth.get_auth_stdin(refresh_token_path) except hangups.GoogleAuthError as e: sys.exit('Login failed ({})'.format(e)) self._client = hangups.Client(cookies) self._client.on_connect.add_observer(self._on_connect) loop = asyncio.get_event_loop() loop.set_exception_handler(self._exception_handler) try: self._urwid_loop = urwid.MainLoop( LoadingWidget(), palette, handle_mouse=False, input_filter=self._input_filter, event_loop=urwid.AsyncioEventLoop(loop=loop) ) except urwid.AttrSpecError as e: # Fail gracefully for invalid colour options. sys.exit(e) self._urwid_loop.screen.set_terminal_properties(colors=palette_colors) self._urwid_loop.start() coros = [self._connect(), self._coroutine_queue.consume()] # Enable bracketed paste mode after the terminal has been switched to # the alternate screen (after MainLoop.start() to work around bug # 729533 in VTE. with bracketed_paste_mode(): try: # Run all the coros, until they all complete or one raises an # exception. In the normal case, HangupsDisconnected will be # raised. loop.run_until_complete(asyncio.gather(*coros)) except HangupsDisconnected: pass finally: # Clean up urwid. self._urwid_loop.stop() # Cancel all of the coros, and wait for them to shut down. task = asyncio.gather(*coros, return_exceptions=True) task.cancel() try: loop.run_until_complete(task) except asyncio.CancelledError: # In Python 3.7, asyncio.gather no longer swallows # CancelledError, so we need to ignore it. pass loop.close() # If an exception was stored, raise it now. This is used for exceptions # originating in urwid callbacks. if self._exception: raise self._exception # pylint: disable=raising-bad-type async def _connect(self): await self._client.connect() raise HangupsDisconnected() def _exception_handler(self, _loop, context): # Start a graceful shutdown. self._coroutine_queue.put(self._client.disconnect()) # Store the exception to be re-raised later. If the context doesn't default_exception = Exception(context.get('message')) self._exception = context.get('exception', default_exception) def _input_filter(self, keys, _): if keys == [self._keys['menu']]: if self._urwid_loop.widget == self._tabbed_window: self._show_menu() else: self._hide_menu() elif keys == [self._keys['quit']]: self._coroutine_queue.put(self._client.disconnect()) else: return keys def _show_menu(self): current_widget = self._tabbed_window.get_current_widget() if hasattr(current_widget, 'get_menu_widget'): menu_widget = current_widget.get_menu_widget(self._hide_menu) overlay = urwid.Overlay(menu_widget, self._tabbed_window, align='center', width=('relative', 80), valign='middle', height=('relative', 80)) self._urwid_loop.widget = overlay def _hide_menu(self): self._urwid_loop.widget = self._tabbed_window def get_conv_widget(self, conv_id): if conv_id not in self._conv_widgets: set_title_cb = (lambda widget, title: self._tabbed_window.set_tab(widget, title=title)) widget = ConversationWidget( self._client, self._coroutine_queue, self._conv_list.get(conv_id), set_title_cb, self._keys, self._datetimefmt ) self._conv_widgets[conv_id] = widget return self._conv_widgets[conv_id] def add_conversation_tab(self, conv_id, switch=False): conv_widget = self.get_conv_widget(conv_id) self._tabbed_window.set_tab(conv_widget, switch=switch, title=conv_widget.title) def on_select_conversation(self, conv_id): self.add_conversation_tab(conv_id, switch=True) async def _on_connect(self): self._user_list, self._conv_list = ( await hangups.build_user_conversation_list(self._client) ) self._conv_list.on_event.add_observer(self._on_event) conv_picker = ConversationPickerWidget(self._conv_list, self.on_select_conversation, self._keys) self._tabbed_window = TabbedWindowWidget(self._keys) self._tabbed_window.set_tab(conv_picker, switch=True, title='Conversations') self._urwid_loop.widget = self._tabbed_window def _on_event(self, conv_event): conv = self._conv_list.get(conv_event.conversation_id) user = conv.get_user(conv_event.user_id) show_notification = all(( isinstance(conv_event, hangups.ChatMessageEvent), not user.is_self, not conv.is_quiet, )) if show_notification: self.add_conversation_tab(conv_event.conversation_id) if self._discreet_notifications: notification = DISCREET_NOTIFICATION else: notification = notifier.Notification( user.full_name, get_conv_name(conv), conv_event.text ) self._notifier.send(notification) class CoroutineQueue: def __init__(self): self._queue = asyncio.Queue() def put(self, coro): assert asyncio.iscoroutine(coro) self._queue.put_nowait(coro) async def consume(self): while True: coro = await self._queue.get() assert asyncio.iscoroutine(coro) await coro class WidgetBase(urwid.WidgetWrap): def keypress(self, size, key): return super().keypress(size, key) class LoadingWidget(WidgetBase): def __init__(self): super().__init__(urwid.Filler( urwid.Text('Connecting...', align='center') )) class RenameConversationDialog(WidgetBase): def __init__(self, coroutine_queue, conversation, on_cancel, on_save, keybindings): self._coroutine_queue = coroutine_queue self._conversation = conversation edit = urwid.Edit(edit_text=get_conv_name(conversation)) items = [ urwid.Text('Rename conversation:'), edit, urwid.Button( 'Save', on_press=lambda _: self._rename(edit.edit_text, on_save) ), urwid.Button('Cancel', on_press=lambda _: on_cancel()), ] list_walker = urwid.SimpleFocusListWalker(items) list_box = ListBox(keybindings, list_walker) super().__init__(list_box) def _rename(self, name, callback): self._coroutine_queue.put(self._conversation.rename(name)) callback() class ConversationMenu(WidgetBase): def __init__(self, coroutine_queue, conversation, close_callback, keybindings): rename_dialog = RenameConversationDialog( coroutine_queue, conversation, lambda: frame.contents.__setitem__('body', (list_box, None)), close_callback, keybindings ) items = [ urwid.Text( 'Conversation name: {}'.format(get_conv_name(conversation)) ), urwid.Button( 'Change Conversation Name', on_press=lambda _: frame.contents.__setitem__( 'body', (rename_dialog, None) ) ), urwid.Divider('-'), urwid.Button('Back', on_press=lambda _: close_callback()), ] list_walker = urwid.SimpleFocusListWalker(items) list_box = ListBox(keybindings, list_walker) frame = urwid.Frame(list_box) padding = urwid.Padding(frame, left=1, right=1) line_box = urwid.LineBox(padding, title='Conversation Menu') super().__init__(line_box) class ConversationButton(WidgetBase): def __init__(self, conversation, on_press): conversation.on_event.add_observer(self._on_event) conversation.on_watermark_notification.add_observer(self._on_event) self._conversation = conversation self._button = urwid.Button(self._get_label(), on_press=on_press, user_data=conversation.id_) super().__init__(self._button) def _get_label(self): return get_conv_name(self._conversation, show_unread=True) def _on_event(self, _): self._button.set_label(self._get_label()) @property def last_modified(self): return self._conversation.last_modified class ConversationListWalker(urwid.SimpleFocusListWalker): def __init__(self, conversation_list, on_select): self._conversation_list = conversation_list self._conversation_list.on_event.add_observer(self._on_event) self._on_press = lambda button, conv_id: on_select(conv_id) convs = sorted(conversation_list.get_all(), reverse=True, key=lambda c: c.last_modified) buttons = [ConversationButton(conv, on_press=self._on_press) for conv in convs] super().__init__(buttons) def _on_event(self, _): self.sort(key=lambda conv_button: conv_button.last_modified, reverse=True) class ListBox(WidgetBase): def __init__(self, keybindings, list_walker): self._keybindings = keybindings super().__init__(urwid.ListBox(list_walker)) def keypress(self, size, key): key = super().keypress(size, key) if key == self._keybindings['down']: super().keypress(size, 'down') elif key == self._keybindings['up']: super().keypress(size, 'up') elif key == self._keybindings['page_up']: super().keypress(size, 'page up') elif key == self._keybindings['page_down']: super().keypress(size, 'page down') else: return key class ConversationPickerWidget(WidgetBase): def __init__(self, conversation_list, on_select, keybindings): list_walker = ConversationListWalker(conversation_list, on_select) list_box = ListBox(keybindings, list_walker) widget = urwid.Padding(list_box, left=2, right=2) super().__init__(widget) class ReturnableEdit(urwid.Edit): def __init__(self, on_return, keybindings, caption=None): super().__init__(caption=caption, multiline=True) self._on_return = on_return self._keys = keybindings self._paste_mode = False def keypress(self, size, key): if key == 'begin paste': self._paste_mode = True elif key == 'end paste': self._paste_mode = False elif key == 'enter' and not self._paste_mode: self._on_return(self.get_edit_text()) self.set_edit_text('') elif key not in self._keys.values() and key in readlike.keys(): text, pos = readlike.edit(self.edit_text, self.edit_pos, key) self.set_edit_text(text) self.set_edit_pos(pos) else: return super().keypress(size, key) class StatusLineWidget(WidgetBase): _MESSAGE_DELAY_SECS = 10 def __init__(self, client, conversation): self._typing_statuses = {} self._conversation = conversation self._conversation.on_event.add_observer(self._on_event) self._conversation.on_typing.add_observer(self._on_typing) self._widget = urwid.Text('', align='center') self._is_connected = True self._message = None self._message_handle = None client.on_disconnect.add_observer(self._on_disconnect) client.on_reconnect.add_observer(self._on_reconnect) super().__init__(urwid.AttrMap(self._widget, 'status_line')) def show_message(self, message_str): if self._message_handle is not None: self._message_handle.cancel() self._message_handle = asyncio.get_event_loop().call_later( self._MESSAGE_DELAY_SECS, self._clear_message ) self._message = message_str self._update() def _clear_message(self): self._message = None self._message_handle = None self._update() def _on_disconnect(self): self._is_connected = False self._update() def _on_reconnect(self): self._is_connected = True self._update() def _on_event(self, conv_event): if isinstance(conv_event, hangups.ChatMessageEvent): self._typing_statuses[conv_event.user_id] = ( hangups.TYPING_TYPE_STOPPED ) self._update() def _on_typing(self, typing_message): self._typing_statuses[typing_message.user_id] = typing_message.status self._update() def _update(self): typing_users = [self._conversation.get_user(user_id) for user_id, status in self._typing_statuses.items() if status == hangups.TYPING_TYPE_STARTED] displayed_names = [user.first_name for user in typing_users if not user.is_self] if displayed_names: typing_message = '{} {} typing...'.format( ', '.join(sorted(displayed_names)), 'is' if len(displayed_names) == 1 else 'are' ) else: typing_message = '' if not self._is_connected: self._widget.set_text("RECONNECTING...") elif self._message is not None: self._widget.set_text(self._message) else: self._widget.set_text(typing_message) class MessageWidget(WidgetBase): def __init__(self, timestamp, text, datetimefmt, user=None, show_date=False): self.timestamp = timestamp text = [ ('msg_date', self._get_date_str(timestamp, datetimefmt, show_date=show_date) + ' '), ('msg_text_self' if user is not None and user.is_self else 'msg_text', text) ] if user is not None: text.insert(1, ('msg_self' if user.is_self else 'msg_sender', user.first_name + ': ')) self._widget = urwid.SelectableIcon(text, cursor_position=0) super().__init__(urwid.AttrMap( self._widget, '', { None: 'msg_selected', 'msg_date': 'msg_selected', 'msg_text_self': 'msg_selected', 'msg_text': 'msg_selected', 'msg_self': 'msg_selected', 'msg_sender': 'msg_selected', } )) @staticmethod def _get_date_str(timestamp, datetimefmt, show_date=False): fmt = '' if show_date: fmt += '\n'+datetimefmt.get('date', '')+'\n' fmt += datetimefmt.get('time', '') return timestamp.astimezone(tz=None).strftime(fmt) def __lt__(self, other): return self.timestamp < other.timestamp @staticmethod def from_conversation_event(conversation, conv_event, prev_conv_event, datetimefmt): user = conversation.get_user(conv_event.user_id) if prev_conv_event is not None: is_new_day = (conv_event.timestamp.astimezone(tz=None).date() != prev_conv_event.timestamp.astimezone(tz=None).date()) else: is_new_day = False if isinstance(conv_event, hangups.ChatMessageEvent): return MessageWidget(conv_event.timestamp, conv_event.text, datetimefmt, user, show_date=is_new_day) elif isinstance(conv_event, hangups.RenameEvent): if conv_event.new_name == '': text = ('{} cleared the conversation name' .format(user.first_name)) else: text = ('{} renamed the conversation to {}' .format(user.first_name, conv_event.new_name)) return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day) elif isinstance(conv_event, hangups.MembershipChangeEvent): event_users = [conversation.get_user(user_id) for user_id in conv_event.participant_ids] names = ', '.join([user.full_name for user in event_users]) if conv_event.type_ == hangups.MEMBERSHIP_CHANGE_TYPE_JOIN: text = ('{} added {} to the conversation' .format(user.first_name, names)) else: text = ('{} left the conversation'.format(names)) return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day) elif isinstance(conv_event, hangups.HangoutEvent): text = { hangups.HANGOUT_EVENT_TYPE_START: ( 'A Hangout call is starting.' ), hangups.HANGOUT_EVENT_TYPE_END: ( 'A Hangout call ended.' ), hangups.HANGOUT_EVENT_TYPE_ONGOING: ( 'A Hangout call is ongoing.' ), }.get(conv_event.event_type, 'Unknown Hangout call event.') return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day) elif isinstance(conv_event, hangups.GroupLinkSharingModificationEvent): status_on = hangups.GROUP_LINK_SHARING_STATUS_ON status_text = ('on' if conv_event.new_status == status_on else 'off') text = '{} turned {} joining by link.'.format(user.first_name, status_text) return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day) else: text = 'Unknown conversation event' return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day) class ConversationEventListWalker(urwid.ListWalker): POSITION_LOADING = 'loading' def __init__(self, coroutine_queue, conversation, datetimefmt): self._coroutine_queue = coroutine_queue self._conversation = conversation self._is_scrolling = False self._is_loading = False self._first_loaded = False # Whether the first event is loaded self._datetimefmt = datetimefmt # Focus position is the first event ID, or POSITION_LOADING. self._focus_position = (conversation.events[-1].id_ if conversation.events else self.POSITION_LOADING) self._conversation.on_event.add_observer(self._handle_event) def _handle_event(self, conv_event): if not self._is_scrolling: self.set_focus(conv_event.id_) else: self._modified() async def _load(self): try: conv_events = await self._conversation.get_events( self._conversation.events[0].id_ ) except (IndexError, hangups.NetworkError): conv_events = [] if not conv_events: self._first_loaded = True if self._focus_position == self.POSITION_LOADING and conv_events: # If the loading indicator is still focused, and we loaded more # events, set focus on the first new event so the loaded # indicator is replaced. self.set_focus(conv_events[-1].id_) else: # Otherwise, still need to invalidate in case the loading # indicator is showing but not focused. self._modified() self._is_loading = False def __getitem__(self, position): if position == self.POSITION_LOADING: if self._first_loaded: # TODO: Show the full date the conversation was created. return urwid.Text('No more messages', align='center') else: # Don't try to load while we're already loading. if not self._is_loading and not self._first_loaded: self._is_loading = True self._coroutine_queue.put(self._load()) return urwid.Text('Loading...', align='center') try: # When creating the widget, also pass the previous event so a # timestamp can be shown if this event occurred on a different day. # Get the previous event, or None if it isn't loaded or doesn't # exist. prev_position = self._get_position(position, prev=True) if prev_position == self.POSITION_LOADING: prev_event = None else: prev_event = self._conversation.get_event(prev_position) return MessageWidget.from_conversation_event( self._conversation, self._conversation.get_event(position), prev_event, self._datetimefmt ) except KeyError: raise IndexError('Invalid position: {}'.format(position)) def _get_position(self, position, prev=False): if position == self.POSITION_LOADING: if prev: raise IndexError('Reached last position') else: return self._conversation.events[0].id_ else: ev = self._conversation.next_event(position, prev=prev) if ev is None: if prev: return self.POSITION_LOADING else: raise IndexError('Reached first position') else: return ev.id_ def next_position(self, position): return self._get_position(position) def prev_position(self, position): return self._get_position(position, prev=True) def set_focus(self, position): self._focus_position = position self._modified() # If we set focus to anywhere but the last position, the user if # scrolling up: try: self.next_position(position) except IndexError: self._is_scrolling = False else: self._is_scrolling = True def get_focus(self): return (self[self._focus_position], self._focus_position) class ConversationWidget(WidgetBase): def __init__(self, client, coroutine_queue, conversation, set_title_cb, keybindings, datetimefmt): self._client = client self._coroutine_queue = coroutine_queue self._conversation = conversation self._conversation.on_event.add_observer(self._on_event) self._conversation.on_watermark_notification.add_observer( self._on_watermark_notification ) self._keys = keybindings self.title = '' self._set_title_cb = set_title_cb self._set_title() self._list_walker = ConversationEventListWalker( coroutine_queue, conversation, datetimefmt ) self._list_box = ListBox(keybindings, self._list_walker) self._status_widget = StatusLineWidget(client, conversation) self._widget = urwid.Pile([ ('weight', 1, self._list_box), ('pack', self._status_widget), ('pack', ReturnableEdit(self._on_return, keybindings, caption='Send message: ')), ]) # focus the edit widget by default self._widget.focus_position = 2 # Display any old ConversationEvents already attached to the # conversation. for event in self._conversation.events: self._on_event(event) super().__init__(self._widget) def get_menu_widget(self, close_callback): return ConversationMenu( self._coroutine_queue, self._conversation, close_callback, self._keys ) def keypress(self, size, key): # Set the client as active. self._coroutine_queue.put(self._client.set_active()) # Mark the newest event as read. self._coroutine_queue.put(self._conversation.update_read_timestamp()) return super().keypress(size, key) def _set_title(self): self.title = get_conv_name(self._conversation, show_unread=True, truncate=True) self._set_title_cb(self, self.title) def _on_return(self, text): # Ignore if the user hasn't typed a message. if not text: return elif text.startswith('/image') and len(text.split(' ')) == 2: filename = text.split(' ')[1] image_file = open(filename, 'rb') text = '' else: image_file = None text = replace_emoticons(text) segments = hangups.ChatMessageSegment.from_str(text) self._coroutine_queue.put( self._handle_send_message( self._conversation.send_message( segments, image_file=image_file ) ) ) async def _handle_send_message(self, coro): try: await coro except hangups.NetworkError: self._status_widget.show_message('Failed to send message') def _on_watermark_notification(self, _): self._set_title() def _on_event(self, _): self._set_title() class TabbedWindowWidget(WidgetBase): def __init__(self, keybindings): self._widgets = [] self._widget_title = {} self._tab_index = None self._keys = keybindings self._tabs = urwid.Text('') self._frame = urwid.Frame(None) super().__init__(urwid.Pile([ ('pack', urwid.AttrMap(self._tabs, 'tab_background')), ('weight', 1, self._frame), ])) def get_current_widget(self): return self._widgets[self._tab_index] def _update_tabs(self): text = [] for num, widget in enumerate(self._widgets): palette = ('active_tab' if num == self._tab_index else 'inactive_tab') text += [ (palette, ' {} '.format(self._widget_title[widget])), ('tab_background', ' '), ] self._tabs.set_text(text) self._frame.contents['body'] = (self._widgets[self._tab_index], None) def keypress(self, size, key): key = super().keypress(size, key) num_tabs = len(self._widgets) if key == self._keys['prev_tab']: self._tab_index = (self._tab_index - 1) % num_tabs self._update_tabs() elif key == self._keys['next_tab']: self._tab_index = (self._tab_index + 1) % num_tabs self._update_tabs() elif key == self._keys['close_tab']: if self._tab_index > 0: curr_tab = self._widgets[self._tab_index] self._widgets.remove(curr_tab) del self._widget_title[curr_tab] self._tab_index -= 1 self._update_tabs() else: return key def set_tab(self, widget, switch=False, title=None): if widget not in self._widgets: self._widgets.append(widget) self._widget_title[widget] = '' if switch: self._tab_index = self._widgets.index(widget) if title: self._widget_title[widget] = title self._update_tabs() def set_terminal_title(title): sys.stdout.write("\x1b]2;{}\x07".format(title)) @contextlib.contextmanager def bracketed_paste_mode(): sys.stdout.write('\x1b[?2004h') try: yield finally: sys.stdout.write('\x1b[?2004l') def dir_maker(path): directory = os.path.dirname(path) if directory != '' and not os.path.isdir(directory): try: os.makedirs(directory) except OSError as e: sys.exit('Failed to create directory: {}'.format(e)) NOTIFIER_TYPES = { 'none': notifier.Notifier, 'default': notifier.DefaultNotifier, 'bell': notifier.BellNotifier, 'dbus': notifier.DbusNotifier, 'apple': notifier.AppleNotifier, } def get_notifier(notification_type, disable_notifications): if disable_notifications: return notifier.Notifier() else: return NOTIFIER_TYPES[notification_type]() def main(): # Build default paths for files. dirs = appdirs.AppDirs('hangups', 'hangups') default_log_path = os.path.join(dirs.user_log_dir, 'hangups.log') default_token_path = os.path.join(dirs.user_cache_dir, 'refresh_token.txt') default_config_path = 'hangups.conf' user_config_path = os.path.join(dirs.user_config_dir, 'hangups.conf') # Create a default empty config file if does not exist. dir_maker(user_config_path) if not os.path.isfile(user_config_path): with open(user_config_path, 'a') as cfg: cfg.write("") parser = configargparse.ArgumentParser( prog='hangups', default_config_files=[default_config_path, user_config_path], formatter_class=configargparse.ArgumentDefaultsHelpFormatter, add_help=False, # Disable help so we can add it to the correct group. ) general_group = parser.add_argument_group('General') general_group.add('-h', '--help', action='help', help='show this help message and exit') general_group.add('--token-path', default=default_token_path, help='path used to store OAuth refresh token') general_group.add('--date-format', default='< %y-%m-%d >', help='date format string') general_group.add('--time-format', default='(%I:%M:%S %p)', help='time format string') general_group.add('-c', '--config', help='configuration file path', is_config_file=True, default=user_config_path) general_group.add('-v', '--version', action='version', version='hangups {}'.format(hangups.__version__)) general_group.add('-d', '--debug', action='store_true', help='log detailed debugging messages') general_group.add('--manual-login', action='store_true', help='enable manual login method using browser') general_group.add('--log', default=default_log_path, help='log file path') key_group = parser.add_argument_group('Keybindings') key_group.add('--key-next-tab', default='ctrl d', help='keybinding for next tab') key_group.add('--key-prev-tab', default='ctrl u', help='keybinding for previous tab') key_group.add('--key-close-tab', default='ctrl w', help='keybinding for close tab') key_group.add('--key-quit', default='ctrl e', help='keybinding for quitting') key_group.add('--key-menu', default='ctrl n', help='keybinding for context menu') key_group.add('--key-up', default='k', help='keybinding for alternate up key') key_group.add('--key-down', default='j', help='keybinding for alternate down key') key_group.add('--key-page-up', default='ctrl b', help='keybinding for alternate page up') key_group.add('--key-page-down', default='ctrl f', help='keybinding for alternate page down') notification_group = parser.add_argument_group('Notifications') # deprecated in favor of --notification-type=none: notification_group.add('-n', '--disable-notifications', action='store_true', help=configargparse.SUPPRESS) notification_group.add('-D', '--discreet-notifications', action='store_true', help='hide message details in notifications') notification_group.add('--notification-type', choices=sorted(NOTIFIER_TYPES.keys()), default='default', help='type of notifications to create') # add color scheme options col_group = parser.add_argument_group('Colors') col_group.add('--col-scheme', choices=COL_SCHEMES.keys(), default='default', help='colour scheme to use') col_group.add('--col-palette-colors', choices=('16', '88', '256'), default=16, help='Amount of available colors') for name in COL_SCHEME_NAMES: col_group.add('--col-' + name.replace('_', '-') + '-fg', help=name + ' foreground color') col_group.add('--col-' + name.replace('_', '-') + '-bg', help=name + ' background color') args = parser.parse_args() # Create all necessary directories. for path in [args.log, args.token_path]: dir_maker(path) logging.basicConfig(filename=args.log, level=logging.DEBUG if args.debug else logging.WARNING, format=LOG_FORMAT) # urwid makes asyncio's debugging logs VERY noisy, so adjust the log level: logging.getLogger('asyncio').setLevel(logging.WARNING) datetimefmt = {'date': args.date_format, 'time': args.time_format} palette_colors = int(args.col_palette_colors) col_scheme = COL_SCHEMES[args.col_scheme] for name in COL_SCHEME_NAMES: col_scheme = add_color_to_scheme(col_scheme, name, getattr(args, 'col_' + name + '_fg'), getattr(args, 'col_' + name + '_bg'), palette_colors) keybindings = { 'next_tab': args.key_next_tab, 'prev_tab': args.key_prev_tab, 'close_tab': args.key_close_tab, 'quit': args.key_quit, 'menu': args.key_menu, 'up': args.key_up, 'down': args.key_down, 'page_up': args.key_page_up, 'page_down': args.key_page_down, } notifier_ = get_notifier( args.notification_type, args.disable_notifications ) if(args.manual_login): hangups.auth.get_auth_manual(args.token_path) sys.exit(1) try: ChatUI( args.token_path, keybindings, col_scheme, palette_colors, datetimefmt, notifier_, args.discreet_notifications ) except KeyboardInterrupt: sys.exit('Caught KeyboardInterrupt, exiting abnormally') if __name__ == '__main__': main()
true
true
1c45fd021187544fbd3336d5d553e7bcdc31d6de
2,477
py
Python
ANPR.py
itcthienkhiem/myANPR
e0a76b2165d539c6a38f51f7485f37349a85a074
[ "Apache-2.0" ]
null
null
null
ANPR.py
itcthienkhiem/myANPR
e0a76b2165d539c6a38f51f7485f37349a85a074
[ "Apache-2.0" ]
null
null
null
ANPR.py
itcthienkhiem/myANPR
e0a76b2165d539c6a38f51f7485f37349a85a074
[ "Apache-2.0" ]
null
null
null
try: import cv2 except ImportError: print ("You must have OpenCV installed") import matplotlib.pyplot as plt import numpy as np #Image(filename='../../../data/ANPR/sample_plates.png') def showfig(image, ucmap): #There is a difference in pixel ordering in OpenCV and Matplotlib. #OpenCV follows BGR order, while matplotlib follows RGB order. if len(image.shape)==3 : b,g,r = cv2.split(image) # get b,g,r image = cv2.merge([r,g,b]) # switch it to rgb imgplot=plt.imshow(image, ucmap) imgplot.axes.get_xaxis().set_visible(False) imgplot.axes.get_yaxis().set_visible(False) plt.show() plt.rcParams['figure.figsize'] = 10, 10 plt.title('Sample Car') image_path="out.jpg" carsample=cv2.imread(image_path) showfig(carsample,None) plt.rcParams['figure.figsize'] = 7,7 # convert into grayscale gray_carsample=cv2.cvtColor(carsample, cv2.COLOR_BGR2GRAY) showfig(gray_carsample, plt.get_cmap('gray')) # blur the image blur=cv2.GaussianBlur(gray_carsample,(5,5),0) showfig(blur, plt.get_cmap('gray')) # find the sobel gradient. use the kernel size to be 3 sobelx=cv2.Sobel(blur, cv2.CV_8U, 1, 0, ksize=3) showfig(sobelx, plt.get_cmap('gray')) #Otsu thresholding _,th2=cv2.threshold(sobelx, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) showfig(th2, plt.get_cmap('gray')) #Morphological Closing se=cv2.getStructuringElement(cv2.MORPH_RECT,(23,2)) closing=cv2.morphologyEx(th2, cv2.MORPH_CLOSE, se) showfig(closing, plt.get_cmap('gray')) _,contours,_=cv2.findContours(closing, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) for cnt in contours: rect=cv2.minAreaRect(cnt) box=cv2.boxPoints(rect) box=np.int0(box) cv2.drawContours(carsample, [box], 0, (0,255,0),2) showfig(carsample, None) def validate(cnt): rect=cv2.minAreaRect(cnt) box=cv2.boxPoints(rect) box=np.int0(box) output=False width=rect[1][0] height=rect[1][1] if ((width!=0) & (height!=0)): if (((height/width>2) & (height>width)) | ((width/height>2) & (width>height))): if((height*width<16000) & (height*width>1000)): output=True return output #Lets draw validated contours with red. for cnt in contours: if validate(cnt): rect=cv2.minAreaRect(cnt) box=cv2.boxPoints(rect) box=np.int0(box) cv2.drawContours(carsample, [box], 0, (0,0,255),2) showfig(carsample, None)
31.75641
88
0.669762
try: import cv2 except ImportError: print ("You must have OpenCV installed") import matplotlib.pyplot as plt import numpy as np def showfig(image, ucmap): if len(image.shape)==3 : b,g,r = cv2.split(image) image = cv2.merge([r,g,b]) imgplot=plt.imshow(image, ucmap) imgplot.axes.get_xaxis().set_visible(False) imgplot.axes.get_yaxis().set_visible(False) plt.show() plt.rcParams['figure.figsize'] = 10, 10 plt.title('Sample Car') image_path="out.jpg" carsample=cv2.imread(image_path) showfig(carsample,None) plt.rcParams['figure.figsize'] = 7,7 gray_carsample=cv2.cvtColor(carsample, cv2.COLOR_BGR2GRAY) showfig(gray_carsample, plt.get_cmap('gray')) blur=cv2.GaussianBlur(gray_carsample,(5,5),0) showfig(blur, plt.get_cmap('gray')) sobelx=cv2.Sobel(blur, cv2.CV_8U, 1, 0, ksize=3) showfig(sobelx, plt.get_cmap('gray')) _,th2=cv2.threshold(sobelx, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) showfig(th2, plt.get_cmap('gray')) se=cv2.getStructuringElement(cv2.MORPH_RECT,(23,2)) closing=cv2.morphologyEx(th2, cv2.MORPH_CLOSE, se) showfig(closing, plt.get_cmap('gray')) _,contours,_=cv2.findContours(closing, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) for cnt in contours: rect=cv2.minAreaRect(cnt) box=cv2.boxPoints(rect) box=np.int0(box) cv2.drawContours(carsample, [box], 0, (0,255,0),2) showfig(carsample, None) def validate(cnt): rect=cv2.minAreaRect(cnt) box=cv2.boxPoints(rect) box=np.int0(box) output=False width=rect[1][0] height=rect[1][1] if ((width!=0) & (height!=0)): if (((height/width>2) & (height>width)) | ((width/height>2) & (width>height))): if((height*width<16000) & (height*width>1000)): output=True return output for cnt in contours: if validate(cnt): rect=cv2.minAreaRect(cnt) box=cv2.boxPoints(rect) box=np.int0(box) cv2.drawContours(carsample, [box], 0, (0,0,255),2) showfig(carsample, None)
true
true
1c45fe0df1db49b180b71b556b709af501dbc80c
86
py
Python
tests/profiling/wrong_program_gevent.py
mykytarudenko/new-project
e06a912382239739dd3f93b54d545b9506102372
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
tests/profiling/wrong_program_gevent.py
mykytarudenko/new-project
e06a912382239739dd3f93b54d545b9506102372
[ "Apache-2.0", "BSD-3-Clause" ]
1
2021-01-27T04:53:24.000Z
2021-01-27T04:53:24.000Z
tests/profiling/wrong_program_gevent.py
mykytarudenko/new-project
e06a912382239739dd3f93b54d545b9506102372
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
from gevent import monkey import ddtrace.profiling.auto # noqa monkey.patch_all()
12.285714
37
0.77907
from gevent import monkey import ddtrace.profiling.auto monkey.patch_all()
true
true
1c45fe43ea64a09d9242eb97f7af409854244804
10,694
py
Python
readers.py
ankitshah009/youtube-8m-1
a0f28c9ca05b72ca709322f2c4871a4345a69fbb
[ "Apache-2.0" ]
2
2018-09-15T04:14:28.000Z
2019-02-14T02:35:55.000Z
readers.py
ankitshah009/youtube-8m-1
a0f28c9ca05b72ca709322f2c4871a4345a69fbb
[ "Apache-2.0" ]
null
null
null
readers.py
ankitshah009/youtube-8m-1
a0f28c9ca05b72ca709322f2c4871a4345a69fbb
[ "Apache-2.0" ]
null
null
null
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides readers configured for different datasets.""" import tensorflow as tf try: # relative imports on gcloud (as a module) from . import utils except ImportError: # relative imports locally (as a script) import utils from tensorflow import logging def resize_axis(tensor, axis, new_size, fill_value=0): """Truncates or pads a tensor to new_size on on a given axis. Truncate or extend tensor such that tensor.shape[axis] == new_size. If the size increases, the padding will be performed at the end, using fill_value. Args: tensor: The tensor to be resized. axis: An integer representing the dimension to be sliced. new_size: An integer or 0d tensor representing the new value for tensor.shape[axis]. fill_value: Value to use to fill any new entries in the tensor. Will be cast to the type of tensor. Returns: The resized tensor. """ tensor = tf.convert_to_tensor(tensor) shape = tf.unstack(tf.shape(tensor)) pad_shape = shape[:] pad_shape[axis] = tf.maximum(0, new_size - shape[axis]) shape[axis] = tf.minimum(shape[axis], new_size) shape = tf.stack(shape) resized = tf.concat([ tf.slice(tensor, tf.zeros_like(shape), shape), tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype)) ], axis) # Update shape. new_shape = tensor.get_shape().as_list() # A copy is being made. new_shape[axis] = new_size resized.set_shape(new_shape) return resized class BaseReader(object): """Inherit from this class when implementing new readers.""" def prepare_reader(self, unused_filename_queue): """Create a thread for generating prediction and label tensors.""" raise NotImplementedError() class YT8MAggregatedFeatureReader(BaseReader): """Reads TFRecords of pre-aggregated Examples. The TFRecords must contain Examples with a sparse int64 'labels' feature and a fixed length float32 feature, obtained from the features in 'feature_name'. The float features are assumed to be an average of dequantized values. """ def __init__(self, num_classes=3862, feature_sizes=[1024, 128], feature_names=["mean_rgb", "mean_audio"]): """Construct a YT8MAggregatedFeatureReader. Args: num_classes: a positive integer for the number of classes. feature_sizes: positive integer(s) for the feature dimensions as a list. feature_names: the feature name(s) in the tensorflow record as a list. """ assert len(feature_names) == len(feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(feature_names), len(feature_sizes)) self.num_classes = num_classes self.feature_sizes = feature_sizes self.feature_names = feature_names def prepare_reader(self, filename_queue, batch_size=1024): """Creates a single reader thread for pre-aggregated YouTube 8M Examples. Args: filename_queue: A tensorflow queue of filename locations. Returns: A tuple of video indexes, features, labels, and padding data. """ reader = tf.TFRecordReader() _, serialized_examples = reader.read_up_to(filename_queue, batch_size) tf.add_to_collection("serialized_examples", serialized_examples) return self.prepare_serialized_examples(serialized_examples) def prepare_serialized_examples(self, serialized_examples): # set the mapping from the fields to data types in the proto num_features = len(self.feature_names) assert num_features > 0, "self.feature_names is empty!" assert len(self.feature_names) == len(self.feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(self.feature_names), len(self.feature_sizes)) feature_map = {"id": tf.FixedLenFeature([], tf.string), "labels": tf.VarLenFeature(tf.int64)} for feature_index in range(num_features): feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature( [self.feature_sizes[feature_index]], tf.float32) features = tf.parse_example(serialized_examples, features=feature_map) labels = tf.sparse_to_indicator(features["labels"], self.num_classes) labels.set_shape([None, self.num_classes]) concatenated_features = tf.concat([ features[feature_name] for feature_name in self.feature_names], 1) return features["id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]]) class YT8MFrameFeatureReader(BaseReader): """Reads TFRecords of SequenceExamples. The TFRecords must contain SequenceExamples with the sparse in64 'labels' context feature and a fixed length byte-quantized feature vector, obtained from the features in 'feature_names'. The quantized features will be mapped back into a range between min_quantized_value and max_quantized_value. """ def __init__(self, num_classes=3862, feature_sizes=[1024, 128], feature_names=["rgb", "audio"], max_frames=300, float16_flag=False): """Construct a YT8MFrameFeatureReader. Args: num_classes: a positive integer for the number of classes. feature_sizes: positive integer(s) for the feature dimensions as a list. feature_names: the feature name(s) in the tensorflow record as a list. max_frames: the maximum number of frames to process. """ assert len(feature_names) == len(feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(feature_names), len(feature_sizes)) self.num_classes = num_classes self.feature_sizes = feature_sizes self.feature_names = feature_names self.max_frames = max_frames self.float16_flag = float16_flag def get_video_matrix(self, features, feature_size, max_frames, max_quantized_value, min_quantized_value): """Decodes features from an input string and quantizes it. Args: features: raw feature values feature_size: length of each frame feature vector max_frames: number of frames (rows) in the output feature_matrix max_quantized_value: the maximum of the quantized value. min_quantized_value: the minimum of the quantized value. Returns: feature_matrix: matrix of all frame-features num_frames: number of frames in the sequence """ dtype = tf.float16 if self.float16_flag else tf.float32 decoded_features = tf.reshape( tf.cast(tf.decode_raw(features, tf.uint8), dtype), [-1, feature_size]) num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames) feature_matrix = utils.Dequantize(decoded_features, max_quantized_value, min_quantized_value) feature_matrix = resize_axis(feature_matrix, 0, max_frames) return feature_matrix, num_frames def prepare_reader(self, filename_queue, max_quantized_value=2, min_quantized_value=-2): """Creates a single reader thread for YouTube8M SequenceExamples. Args: filename_queue: A tensorflow queue of filename locations. max_quantized_value: the maximum of the quantized value. min_quantized_value: the minimum of the quantized value. Returns: A tuple of video indexes, video features, labels, and padding data. """ reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) return self.prepare_serialized_examples(serialized_example, max_quantized_value, min_quantized_value) def prepare_serialized_examples(self, serialized_example, max_quantized_value=2, min_quantized_value=-2): contexts, features = tf.parse_single_sequence_example( serialized_example, context_features={"id": tf.FixedLenFeature( [], tf.string), "labels": tf.VarLenFeature(tf.int64)}, sequence_features={ feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string) for feature_name in self.feature_names }) # read ground truth labels labels = (tf.cast( tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1, validate_indices=False), tf.bool)) # loads (potentially) different types of features and concatenates them num_features = len(self.feature_names) assert num_features > 0, "No feature selected: feature_names is empty!" assert len(self.feature_names) == len(self.feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(self.feature_names), len(self.feature_sizes)) num_frames = -1 # the number of frames in the video feature_matrices = [None] * num_features # an array of different features for feature_index in range(num_features): feature_matrix, num_frames_in_this_feature = self.get_video_matrix( features[self.feature_names[feature_index]], self.feature_sizes[feature_index], self.max_frames, max_quantized_value, min_quantized_value) if num_frames == -1: num_frames = num_frames_in_this_feature else: tf.assert_equal(num_frames, num_frames_in_this_feature) feature_matrices[feature_index] = feature_matrix # cap the number of frames at self.max_frames num_frames = tf.minimum(num_frames, self.max_frames) # concatenate different features video_matrix = tf.concat(feature_matrices, 1) # convert to batch format. # TODO: Do proper batch reads to remove the IO bottleneck. batch_video_ids = tf.expand_dims(contexts["id"], 0) batch_video_matrix = tf.expand_dims(video_matrix, 0) batch_labels = tf.expand_dims(labels, 0) batch_frames = tf.expand_dims(num_frames, 0) return batch_video_ids, batch_video_matrix, batch_labels, batch_frames
37.921986
101
0.696559
import tensorflow as tf try: from . import utils except ImportError: import utils from tensorflow import logging def resize_axis(tensor, axis, new_size, fill_value=0): tensor = tf.convert_to_tensor(tensor) shape = tf.unstack(tf.shape(tensor)) pad_shape = shape[:] pad_shape[axis] = tf.maximum(0, new_size - shape[axis]) shape[axis] = tf.minimum(shape[axis], new_size) shape = tf.stack(shape) resized = tf.concat([ tf.slice(tensor, tf.zeros_like(shape), shape), tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype)) ], axis) new_shape = tensor.get_shape().as_list() new_shape[axis] = new_size resized.set_shape(new_shape) return resized class BaseReader(object): def prepare_reader(self, unused_filename_queue): raise NotImplementedError() class YT8MAggregatedFeatureReader(BaseReader): def __init__(self, num_classes=3862, feature_sizes=[1024, 128], feature_names=["mean_rgb", "mean_audio"]): assert len(feature_names) == len(feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(feature_names), len(feature_sizes)) self.num_classes = num_classes self.feature_sizes = feature_sizes self.feature_names = feature_names def prepare_reader(self, filename_queue, batch_size=1024): reader = tf.TFRecordReader() _, serialized_examples = reader.read_up_to(filename_queue, batch_size) tf.add_to_collection("serialized_examples", serialized_examples) return self.prepare_serialized_examples(serialized_examples) def prepare_serialized_examples(self, serialized_examples): num_features = len(self.feature_names) assert num_features > 0, "self.feature_names is empty!" assert len(self.feature_names) == len(self.feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(self.feature_names), len(self.feature_sizes)) feature_map = {"id": tf.FixedLenFeature([], tf.string), "labels": tf.VarLenFeature(tf.int64)} for feature_index in range(num_features): feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature( [self.feature_sizes[feature_index]], tf.float32) features = tf.parse_example(serialized_examples, features=feature_map) labels = tf.sparse_to_indicator(features["labels"], self.num_classes) labels.set_shape([None, self.num_classes]) concatenated_features = tf.concat([ features[feature_name] for feature_name in self.feature_names], 1) return features["id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]]) class YT8MFrameFeatureReader(BaseReader): def __init__(self, num_classes=3862, feature_sizes=[1024, 128], feature_names=["rgb", "audio"], max_frames=300, float16_flag=False): assert len(feature_names) == len(feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(feature_names), len(feature_sizes)) self.num_classes = num_classes self.feature_sizes = feature_sizes self.feature_names = feature_names self.max_frames = max_frames self.float16_flag = float16_flag def get_video_matrix(self, features, feature_size, max_frames, max_quantized_value, min_quantized_value): dtype = tf.float16 if self.float16_flag else tf.float32 decoded_features = tf.reshape( tf.cast(tf.decode_raw(features, tf.uint8), dtype), [-1, feature_size]) num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames) feature_matrix = utils.Dequantize(decoded_features, max_quantized_value, min_quantized_value) feature_matrix = resize_axis(feature_matrix, 0, max_frames) return feature_matrix, num_frames def prepare_reader(self, filename_queue, max_quantized_value=2, min_quantized_value=-2): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) return self.prepare_serialized_examples(serialized_example, max_quantized_value, min_quantized_value) def prepare_serialized_examples(self, serialized_example, max_quantized_value=2, min_quantized_value=-2): contexts, features = tf.parse_single_sequence_example( serialized_example, context_features={"id": tf.FixedLenFeature( [], tf.string), "labels": tf.VarLenFeature(tf.int64)}, sequence_features={ feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string) for feature_name in self.feature_names }) labels = (tf.cast( tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1, validate_indices=False), tf.bool)) num_features = len(self.feature_names) assert num_features > 0, "No feature selected: feature_names is empty!" assert len(self.feature_names) == len(self.feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(self.feature_names), len(self.feature_sizes)) num_frames = -1 feature_matrices = [None] * num_features for feature_index in range(num_features): feature_matrix, num_frames_in_this_feature = self.get_video_matrix( features[self.feature_names[feature_index]], self.feature_sizes[feature_index], self.max_frames, max_quantized_value, min_quantized_value) if num_frames == -1: num_frames = num_frames_in_this_feature else: tf.assert_equal(num_frames, num_frames_in_this_feature) feature_matrices[feature_index] = feature_matrix num_frames = tf.minimum(num_frames, self.max_frames) video_matrix = tf.concat(feature_matrices, 1) batch_video_ids = tf.expand_dims(contexts["id"], 0) batch_video_matrix = tf.expand_dims(video_matrix, 0) batch_labels = tf.expand_dims(labels, 0) batch_frames = tf.expand_dims(num_frames, 0) return batch_video_ids, batch_video_matrix, batch_labels, batch_frames
true
true
1c45ffd929e7bc233ccb8e5fdcdba952ce40321b
6,303
py
Python
userbot/plugins/updater.py
kumar451/CatUserbot
44fab853232fad163fee63565cc4f3e645596527
[ "MIT" ]
null
null
null
userbot/plugins/updater.py
kumar451/CatUserbot
44fab853232fad163fee63565cc4f3e645596527
[ "MIT" ]
null
null
null
userbot/plugins/updater.py
kumar451/CatUserbot
44fab853232fad163fee63565cc4f3e645596527
[ "MIT" ]
null
null
null
"""Update UserBot code (for Xtra-Telegram) Syntax: .update \nAll Credits goes to © @Three_Cube_TeKnoways \nFor this awasome plugin.\nPorted from PpaperPlane Extended""" from os import remove, execle, path, makedirs, getenv, environ,execl from shutil import rmtree import asyncio import sys from git import Repo from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError from userbot import CMD_HELP, bot from userbot.utils import admin_cmd UPSTREAM_REPO_URL = "https://github.com/Jisan09/catuserbot.git" HEROKU_API_KEY = Var.HEROKU_API_KEY HEROKU_APP_NAME = Var.HEROKU_APP_NAME requirements_path = path.join( path.dirname(path.dirname(path.dirname(__file__))), 'requirements.txt') async def gen_chlog(repo, diff): ch_log = '' d_form = "%d/%m/%y" for c in repo.iter_commits(diff): ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} by <{c.author}>\n' return ch_log async def update_requirements(): reqs = str(requirements_path) try: process = await asyncio.create_subprocess_shell( ' '.join([sys.executable, "-m", "pip", "install", "-r", reqs]), stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) await process.communicate() return process.returncode except Exception as e: return repr(e) @borg.on(admin_cmd(pattern="update ?(.*)", outgoing=True)) async def upstream(ups): "For .update command, check if the bot is up to date, update if specified" conf = ups.pattern_match.group(1) await ups.edit("Checking for updates, please wait....") off_repo = UPSTREAM_REPO_URL force_update = False try: txt = "Oops.. Updater cannot continue due to " txt += "some problems occured`\n\n**LOGTRACE:**\n" repo = Repo() except NoSuchPathError as error: await ups.edit(f'{txt}\ndirectory {error} is not found') repo.__del__() return except GitCommandError as error: await ups.edit(f'{txt}\nEarly failure! {error}') repo.__del__() return except InvalidGitRepositoryError as error: if conf != "now": await ups.edit(f"Unfortunately, the directory {error} does not seem to be a git repository.\ \nBut we can fix that by force updating the userbot using `.update now`.") return repo = Repo.init() origin = repo.create_remote('upstream', off_repo) origin.fetch() force_update = True repo.create_head('master', origin.refs.master) repo.heads.master.set_tracking_branch(origin.refs.master) repo.heads.master.checkout(True) ac_br = repo.active_branch.name if ac_br != 'master': await ups.edit( f'**[UPDATER]:**` Looks like you are using your own custom branch ({ac_br}). ' 'in that case, Updater is unable to identify ' 'which branch is to be merged. ' 'please checkout to any official branch`') repo.__del__() return try: repo.create_remote('upstream', off_repo) except BaseException: pass ups_rem = repo.remote('upstream') ups_rem.fetch(ac_br) changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}') if not changelog and not force_update: await ups.edit( f'\n`Your BOT is` **up-to-date** `with` **{ac_br}**\n') repo.__del__() return if conf != "now" and not force_update: changelog_str = f'**New UPDATE available for [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`' if len(changelog_str) > 4096: await ups.edit("`Changelog is too big, view the file to see it.`") file = open("output.txt", "w+") file.write(changelog_str) file.close() await ups.client.send_file( ups.chat_id, "output.txt", reply_to=ups.id, ) remove("output.txt") else: await ups.edit(changelog_str) await ups.respond("do `.update now` to update") return if force_update: await ups.edit('Force-Syncing to latest stable userbot code, please wait...') else: await ups.edit('Updating userbot, please wait....') if HEROKU_API_KEY is not None: import heroku3 heroku = heroku3.from_key(HEROKU_API_KEY) heroku_app = None heroku_applications = heroku.apps() if not HEROKU_APP_NAME: await ups.edit('CAT Please set up the `HEROKU_APP_NAME` variable to be able to update userbot.') repo.__del__() return for app in heroku_applications: if app.name == HEROKU_APP_NAME: heroku_app = app break if heroku_app is None: await ups.edit( f'{txt}\n`Invalid Heroku credentials for updating userbot dyno.`' ) repo.__del__() return ups_rem.fetch(ac_br) repo.git.reset("--hard", "FETCH_HEAD") heroku_git_url = heroku_app.git_url.replace( "https://", "https://api:" + HEROKU_API_KEY + "@") if "heroku" in repo.remotes: remote = repo.remote("heroku") remote.set_url(heroku_git_url) else: remote = repo.create_remote("heroku", heroku_git_url) await ups.edit("Updating and Deploying New Update. Please wait for 5 minutes then use `.alive` to check if i'm working or not.") remote.push(refspec="HEAD:refs/heads/master", force=True) else: try: ups_rem.pull(ac_br) except GitCommandError: repo.git.reset("--hard", "FETCH_HEAD") reqs_upgrade = await update_requirements() await ups.edit('`Successfully Updated!\n' 'Bot is restarting... Wait for a second!`') # Spin a new instance of bot args = [sys.executable, "-m", "userbot"] execle(sys.executable, *args, environ) return CMD_HELP.update({ 'update': ".update\ \nUsage: Checks if the main userbot repository has any updates and shows a changelog if so.\ \n\n.update now\ \nUsage: Updates your userbot, if there are any updates in the main userbot repository." })
38.2
136
0.618912
from os import remove, execle, path, makedirs, getenv, environ,execl from shutil import rmtree import asyncio import sys from git import Repo from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError from userbot import CMD_HELP, bot from userbot.utils import admin_cmd UPSTREAM_REPO_URL = "https://github.com/Jisan09/catuserbot.git" HEROKU_API_KEY = Var.HEROKU_API_KEY HEROKU_APP_NAME = Var.HEROKU_APP_NAME requirements_path = path.join( path.dirname(path.dirname(path.dirname(__file__))), 'requirements.txt') async def gen_chlog(repo, diff): ch_log = '' d_form = "%d/%m/%y" for c in repo.iter_commits(diff): ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} by <{c.author}>\n' return ch_log async def update_requirements(): reqs = str(requirements_path) try: process = await asyncio.create_subprocess_shell( ' '.join([sys.executable, "-m", "pip", "install", "-r", reqs]), stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) await process.communicate() return process.returncode except Exception as e: return repr(e) @borg.on(admin_cmd(pattern="update ?(.*)", outgoing=True)) async def upstream(ups): conf = ups.pattern_match.group(1) await ups.edit("Checking for updates, please wait....") off_repo = UPSTREAM_REPO_URL force_update = False try: txt = "Oops.. Updater cannot continue due to " txt += "some problems occured`\n\n**LOGTRACE:**\n" repo = Repo() except NoSuchPathError as error: await ups.edit(f'{txt}\ndirectory {error} is not found') repo.__del__() return except GitCommandError as error: await ups.edit(f'{txt}\nEarly failure! {error}') repo.__del__() return except InvalidGitRepositoryError as error: if conf != "now": await ups.edit(f"Unfortunately, the directory {error} does not seem to be a git repository.\ \nBut we can fix that by force updating the userbot using `.update now`.") return repo = Repo.init() origin = repo.create_remote('upstream', off_repo) origin.fetch() force_update = True repo.create_head('master', origin.refs.master) repo.heads.master.set_tracking_branch(origin.refs.master) repo.heads.master.checkout(True) ac_br = repo.active_branch.name if ac_br != 'master': await ups.edit( f'**[UPDATER]:**` Looks like you are using your own custom branch ({ac_br}). ' 'in that case, Updater is unable to identify ' 'which branch is to be merged. ' 'please checkout to any official branch`') repo.__del__() return try: repo.create_remote('upstream', off_repo) except BaseException: pass ups_rem = repo.remote('upstream') ups_rem.fetch(ac_br) changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}') if not changelog and not force_update: await ups.edit( f'\n`Your BOT is` **up-to-date** `with` **{ac_br}**\n') repo.__del__() return if conf != "now" and not force_update: changelog_str = f'**New UPDATE available for [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`' if len(changelog_str) > 4096: await ups.edit("`Changelog is too big, view the file to see it.`") file = open("output.txt", "w+") file.write(changelog_str) file.close() await ups.client.send_file( ups.chat_id, "output.txt", reply_to=ups.id, ) remove("output.txt") else: await ups.edit(changelog_str) await ups.respond("do `.update now` to update") return if force_update: await ups.edit('Force-Syncing to latest stable userbot code, please wait...') else: await ups.edit('Updating userbot, please wait....') if HEROKU_API_KEY is not None: import heroku3 heroku = heroku3.from_key(HEROKU_API_KEY) heroku_app = None heroku_applications = heroku.apps() if not HEROKU_APP_NAME: await ups.edit('CAT Please set up the `HEROKU_APP_NAME` variable to be able to update userbot.') repo.__del__() return for app in heroku_applications: if app.name == HEROKU_APP_NAME: heroku_app = app break if heroku_app is None: await ups.edit( f'{txt}\n`Invalid Heroku credentials for updating userbot dyno.`' ) repo.__del__() return ups_rem.fetch(ac_br) repo.git.reset("--hard", "FETCH_HEAD") heroku_git_url = heroku_app.git_url.replace( "https://", "https://api:" + HEROKU_API_KEY + "@") if "heroku" in repo.remotes: remote = repo.remote("heroku") remote.set_url(heroku_git_url) else: remote = repo.create_remote("heroku", heroku_git_url) await ups.edit("Updating and Deploying New Update. Please wait for 5 minutes then use `.alive` to check if i'm working or not.") remote.push(refspec="HEAD:refs/heads/master", force=True) else: try: ups_rem.pull(ac_br) except GitCommandError: repo.git.reset("--hard", "FETCH_HEAD") reqs_upgrade = await update_requirements() await ups.edit('`Successfully Updated!\n' 'Bot is restarting... Wait for a second!`') # Spin a new instance of bot args = [sys.executable, "-m", "userbot"] execle(sys.executable, *args, environ) return CMD_HELP.update({ 'update': ".update\ \nUsage: Checks if the main userbot repository has any updates and shows a changelog if so.\ \n\n.update now\ \nUsage: Updates your userbot, if there are any updates in the main userbot repository." })
true
true
1c460171229eb1de0dd2daccc2cae4a857668e41
6,051
py
Python
util.py
bbitarello/ldpred
b84b99f23dc83dc164300b8dee6678207461a751
[ "MIT" ]
89
2016-06-03T14:31:21.000Z
2022-02-22T02:15:45.000Z
util.py
bbitarello/ldpred
b84b99f23dc83dc164300b8dee6678207461a751
[ "MIT" ]
143
2016-08-10T14:06:53.000Z
2021-07-04T10:29:26.000Z
util.py
bbitarello/ldpred
b84b99f23dc83dc164300b8dee6678207461a751
[ "MIT" ]
68
2016-08-05T14:56:39.000Z
2021-12-10T15:43:35.000Z
""" Various general utility functions. """ import scipy as sp from scipy import stats import pickle import gzip import os from itertools import takewhile from itertools import repeat import sys import re # LDpred currently ignores the Y and MT chromosomes. ok_chromosomes = set(range(1, 24)) chromosomes_list = ['chrom_%d' % (chrom) for chrom in ok_chromosomes] chrom_name_map = {'X':23,'chr_X':23,'chrom_X':23} for chrom in ok_chromosomes: chrom_name_map['%d' % (chrom)]=chrom chrom_name_map['chrom_%d' % (chrom)]=chrom chrom_name_map['chr_%d' % (chrom)]=chrom def get_chrom_num(chrom): return chrom_name_map.get(re.sub("chr", "", chrom),0) #Various auxiliary variables ambig_nts = set([('A', 'T'), ('T', 'A'), ('G', 'C'), ('C', 'G')]) opp_strand_dict = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G'} valid_nts = set(['A', 'T', 'C', 'G']) # LDpred currently ignores the Y and MT chromosomes. valid_chromosomes = ['%d' % (x) for x in range(1, 24)] valid_chromosomes.append('X') chromosomes_list = ['chrom_%s' % (chrom) for chrom in valid_chromosomes] #Conversion sizes for strings (necessary for using h5py and python 3) fids_dtype = '|S64' iids_dtype = '|S64' sids_dtype = "|S30" nts_dtype = "|S1" sids_u_dtype = '<U30' nts_u_dtype = '<U1' my_path = os.path.dirname(os.path.abspath(__file__)) hm3_file = os.path.join(my_path, 'reference','hm3_sids.txt.gz') lrld_file = os.path.join(my_path, 'reference','long-range-ld-price-2008hg38.txt') def check_chromosomes(missing_chromosomes): if len(missing_chromosomes) > 0: print('Ignored chromosomes:', ','.join(list(missing_chromosomes))) print('Please note that only data on chromosomes 1-23, and X is parsed.') def calc_auc(y_true, y_hat, show_plot=False): """ Calculate the Area Under the Curve (AUC) for a predicted and observed case-control phenotype. """ y_true = sp.copy(y_true) if len(sp.unique(y_true)) == 2: y_min = y_true.min() y_max = y_true.max() if y_min != 0 or y_max != 1: print('Transforming back to a dichotomous trait') y_true[y_true == y_min] = 0 y_true[y_true == y_max] = 1 else: print('Warning: Calculating AUC for a quantitative phenotype.') y_mean = sp.mean(y_true) zero_filter = y_true <= y_mean one_filter = y_true > y_mean y_true[zero_filter] = 0 y_true[one_filter] = 1 num_cases = sp.sum(y_true == 1) num_controls = sp.sum(y_true == 0) assert num_cases + num_controls == len(y_true), 'The phenotype is not defined as expected. It is not binary (0 1 case-control status).' print('%d cases, %d controls' % (num_cases, num_controls)) num_indivs = float(len(y_true)) tot_num_pos = float(sp.sum(y_true)) tot_num_neg = float(num_indivs - tot_num_pos) l = y_hat.tolist() l.sort(reverse=True) roc_x = [] roc_y = [] auc = 0.0 prev_fpr = 0.0 for thres in l: thres_filter = y_hat >= thres y_t = y_true[thres_filter] n = len(y_t) tp = sp.sum(y_t) fp = n - tp fpr = fp / tot_num_neg tpr = tp / tot_num_pos roc_x.append(fpr) roc_y.append(tpr) delta_fpr = fpr - prev_fpr auc += tpr * delta_fpr prev_fpr = fpr print('AUC: %0.4f' % auc) if show_plot: import pylab pylab.plot(roc_x, roc_y) pylab.show() return auc def obs_h2_to_liab(R2_osb,K=0.01,P=0.5): """ Transformation from observed to liability scale. Lee et al. AJHG 2011 conversion? For heritability only """ t = stats.norm.ppf(1-K) z = stats.norm.pdf(t) c = P*(1-P)*z**2/(K**2*(1-K)**2) R2_liab = R2_osb/c return R2_liab def obs_r2_to_liab(R2_osb,K=0.01,P=0.5): """ Lee et al., Gen Epi 2012 conversion For R2 only """ t = stats.norm.ppf(K) z = stats.norm.pdf(t) m = z/K C = (K*(1-K))**2/((z**2)*(P*(1-P))) d = m*((P-K)/(1-K)) theta =d**2 - d*t R2_liab_cc = (R2_osb*C)/(1+(R2_osb*C*theta)) return R2_liab_cc def load_hapmap_SNPs(): f = gzip.open(hm3_file, 'r') hm3_sids = pickle.load(f) f.close() return hm3_sids def load_lrld_dict(): #Load Price et al. AJHG 2008 long range LD table. d = {} for chrom in ok_chromosomes: d[chrom] = {'reg_dict':{}} with open(lrld_file, 'r') as f: for line in f: l = line.split() d[chrom_name_map[l[0]]][l[3]] = {'start_pos':int(l[1]), 'end_pos':int(l[2])} return d def is_in_lrld(chrom, pos, lrld_dict): if len(lrld_dict[chrom]['reg_dict'])==0: return False else: for lrld_reg in lrld_dict[chrom]['reg_dict']: if lrld_reg['start_pos'] < pos < lrld_reg['end_pos']: return True else: return False def get_snp_lrld_status(chromosome, positions, lrld_dict): snp_lrld = sp.zeros(len(positions)) for snp_i in range(len(positions)): snp_lrld[snp_i] = is_in_lrld(chromosome, positions[snp_i], lrld_dict) return snp_lrld def is_gz(name): return name.lower().endswith(('.gz', '.gzip')) def count_lines(filename): if sys.version_info >= (3,0): return count_lines_fast(filename) else: return count_lines_slow(filename) def count_lines_fast(filename): opener = open if is_gz(filename): opener = gzip.open try: with opener(filename, 'rb') as f: bufgen = takewhile(lambda x: x, (f.raw.read(1024*1024) for _ in repeat(None))) num_lines =sum( buf.count(b'\n') for buf in bufgen ) except Exception: num_lines = -1 return num_lines def count_lines_slow(filename): opener = open if is_gz(filename): opener = gzip.open try: with opener(filename, 'rb') as f: num_lines = sum(1 for line in f) except Exception: num_lines=-1 return num_lines
27.013393
139
0.605354
import scipy as sp from scipy import stats import pickle import gzip import os from itertools import takewhile from itertools import repeat import sys import re ok_chromosomes = set(range(1, 24)) chromosomes_list = ['chrom_%d' % (chrom) for chrom in ok_chromosomes] chrom_name_map = {'X':23,'chr_X':23,'chrom_X':23} for chrom in ok_chromosomes: chrom_name_map['%d' % (chrom)]=chrom chrom_name_map['chrom_%d' % (chrom)]=chrom chrom_name_map['chr_%d' % (chrom)]=chrom def get_chrom_num(chrom): return chrom_name_map.get(re.sub("chr", "", chrom),0) ambig_nts = set([('A', 'T'), ('T', 'A'), ('G', 'C'), ('C', 'G')]) opp_strand_dict = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G'} valid_nts = set(['A', 'T', 'C', 'G']) valid_chromosomes = ['%d' % (x) for x in range(1, 24)] valid_chromosomes.append('X') chromosomes_list = ['chrom_%s' % (chrom) for chrom in valid_chromosomes] fids_dtype = '|S64' iids_dtype = '|S64' sids_dtype = "|S30" nts_dtype = "|S1" sids_u_dtype = '<U30' nts_u_dtype = '<U1' my_path = os.path.dirname(os.path.abspath(__file__)) hm3_file = os.path.join(my_path, 'reference','hm3_sids.txt.gz') lrld_file = os.path.join(my_path, 'reference','long-range-ld-price-2008hg38.txt') def check_chromosomes(missing_chromosomes): if len(missing_chromosomes) > 0: print('Ignored chromosomes:', ','.join(list(missing_chromosomes))) print('Please note that only data on chromosomes 1-23, and X is parsed.') def calc_auc(y_true, y_hat, show_plot=False): y_true = sp.copy(y_true) if len(sp.unique(y_true)) == 2: y_min = y_true.min() y_max = y_true.max() if y_min != 0 or y_max != 1: print('Transforming back to a dichotomous trait') y_true[y_true == y_min] = 0 y_true[y_true == y_max] = 1 else: print('Warning: Calculating AUC for a quantitative phenotype.') y_mean = sp.mean(y_true) zero_filter = y_true <= y_mean one_filter = y_true > y_mean y_true[zero_filter] = 0 y_true[one_filter] = 1 num_cases = sp.sum(y_true == 1) num_controls = sp.sum(y_true == 0) assert num_cases + num_controls == len(y_true), 'The phenotype is not defined as expected. It is not binary (0 1 case-control status).' print('%d cases, %d controls' % (num_cases, num_controls)) num_indivs = float(len(y_true)) tot_num_pos = float(sp.sum(y_true)) tot_num_neg = float(num_indivs - tot_num_pos) l = y_hat.tolist() l.sort(reverse=True) roc_x = [] roc_y = [] auc = 0.0 prev_fpr = 0.0 for thres in l: thres_filter = y_hat >= thres y_t = y_true[thres_filter] n = len(y_t) tp = sp.sum(y_t) fp = n - tp fpr = fp / tot_num_neg tpr = tp / tot_num_pos roc_x.append(fpr) roc_y.append(tpr) delta_fpr = fpr - prev_fpr auc += tpr * delta_fpr prev_fpr = fpr print('AUC: %0.4f' % auc) if show_plot: import pylab pylab.plot(roc_x, roc_y) pylab.show() return auc def obs_h2_to_liab(R2_osb,K=0.01,P=0.5): t = stats.norm.ppf(1-K) z = stats.norm.pdf(t) c = P*(1-P)*z**2/(K**2*(1-K)**2) R2_liab = R2_osb/c return R2_liab def obs_r2_to_liab(R2_osb,K=0.01,P=0.5): t = stats.norm.ppf(K) z = stats.norm.pdf(t) m = z/K C = (K*(1-K))**2/((z**2)*(P*(1-P))) d = m*((P-K)/(1-K)) theta =d**2 - d*t R2_liab_cc = (R2_osb*C)/(1+(R2_osb*C*theta)) return R2_liab_cc def load_hapmap_SNPs(): f = gzip.open(hm3_file, 'r') hm3_sids = pickle.load(f) f.close() return hm3_sids def load_lrld_dict(): d = {} for chrom in ok_chromosomes: d[chrom] = {'reg_dict':{}} with open(lrld_file, 'r') as f: for line in f: l = line.split() d[chrom_name_map[l[0]]][l[3]] = {'start_pos':int(l[1]), 'end_pos':int(l[2])} return d def is_in_lrld(chrom, pos, lrld_dict): if len(lrld_dict[chrom]['reg_dict'])==0: return False else: for lrld_reg in lrld_dict[chrom]['reg_dict']: if lrld_reg['start_pos'] < pos < lrld_reg['end_pos']: return True else: return False def get_snp_lrld_status(chromosome, positions, lrld_dict): snp_lrld = sp.zeros(len(positions)) for snp_i in range(len(positions)): snp_lrld[snp_i] = is_in_lrld(chromosome, positions[snp_i], lrld_dict) return snp_lrld def is_gz(name): return name.lower().endswith(('.gz', '.gzip')) def count_lines(filename): if sys.version_info >= (3,0): return count_lines_fast(filename) else: return count_lines_slow(filename) def count_lines_fast(filename): opener = open if is_gz(filename): opener = gzip.open try: with opener(filename, 'rb') as f: bufgen = takewhile(lambda x: x, (f.raw.read(1024*1024) for _ in repeat(None))) num_lines =sum( buf.count(b'\n') for buf in bufgen ) except Exception: num_lines = -1 return num_lines def count_lines_slow(filename): opener = open if is_gz(filename): opener = gzip.open try: with opener(filename, 'rb') as f: num_lines = sum(1 for line in f) except Exception: num_lines=-1 return num_lines
true
true
1c4601d6ca6b386fcd89245ffea8fedcc89875c1
2,924
py
Python
test/functional/mempool_resurrect.py
patrykwnosuch/machinecoin-core
b6783c857f43f7f077de594d1e03d156f5295b9c
[ "MIT" ]
1
2019-05-27T11:12:53.000Z
2019-05-27T11:12:53.000Z
test/functional/mempool_resurrect.py
patrykwnosuch/machinecoin-core
b6783c857f43f7f077de594d1e03d156f5295b9c
[ "MIT" ]
null
null
null
test/functional/mempool_resurrect.py
patrykwnosuch/machinecoin-core
b6783c857f43f7f077de594d1e03d156f5295b9c
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) 2014-2018 The Machinecoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test resurrection of mined transactions when the blockchain is re-organized.""" from test_framework.blocktools import create_raw_transaction from test_framework.test_framework import MachinecoinTestFramework from test_framework.util import assert_equal class MempoolCoinbaseTest(MachinecoinTestFramework): def set_test_params(self): self.num_nodes = 1 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): node0_address = self.nodes[0].getnewaddress() # Spend block 1/2/3's coinbase transactions # Mine a block. # Create three more transactions, spending the spends # Mine another block. # ... make sure all the transactions are confirmed # Invalidate both blocks # ... make sure all the transactions are put back in the mempool # Mine a new block # ... make sure all the transactions are confirmed again. b = [self.nodes[0].getblockhash(n) for n in range(1, 4)] coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b] spends1_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.99) for txid in coinbase_txids] spends1_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw] blocks = [] blocks.extend(self.nodes[0].generate(1)) spends2_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.98) for txid in spends1_id] spends2_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw] blocks.extend(self.nodes[0].generate(1)) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) # Use invalidateblock to re-org back for node in self.nodes: node.invalidateblock(blocks[0]) # All txns should be back in mempool with 0 confirmations assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id)) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] == 0) # Generate another block, they should all get mined self.nodes[0].generate(1) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) if __name__ == '__main__': MempoolCoinbaseTest().main()
41.183099
123
0.675103
from test_framework.blocktools import create_raw_transaction from test_framework.test_framework import MachinecoinTestFramework from test_framework.util import assert_equal class MempoolCoinbaseTest(MachinecoinTestFramework): def set_test_params(self): self.num_nodes = 1 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): node0_address = self.nodes[0].getnewaddress() # Mine a block. # Create three more transactions, spending the spends # Mine another block. # ... make sure all the transactions are confirmed # Invalidate both blocks # ... make sure all the transactions are put back in the mempool # Mine a new block # ... make sure all the transactions are confirmed again. b = [self.nodes[0].getblockhash(n) for n in range(1, 4)] coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b] spends1_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.99) for txid in coinbase_txids] spends1_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw] blocks = [] blocks.extend(self.nodes[0].generate(1)) spends2_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.98) for txid in spends1_id] spends2_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw] blocks.extend(self.nodes[0].generate(1)) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) # Use invalidateblock to re-org back for node in self.nodes: node.invalidateblock(blocks[0]) # All txns should be back in mempool with 0 confirmations assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id)) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] == 0) # Generate another block, they should all get mined self.nodes[0].generate(1) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) if __name__ == '__main__': MempoolCoinbaseTest().main()
true
true
1c4602bc2b8f35a6487314afa0547956c601bf34
3,032
py
Python
estatisticas_facebook/users/migrations/0001_initial.py
danieldourado/estatisticas_facebook_django
67274e647cf9e2261f1a7810cd9862a4040dfc06
[ "MIT" ]
2
2017-12-22T01:00:22.000Z
2017-12-22T11:14:40.000Z
estatisticas_facebook/users/migrations/0001_initial.py
danieldourado/estatisticas_facebook_django
67274e647cf9e2261f1a7810cd9862a4040dfc06
[ "MIT" ]
18
2017-12-14T12:04:45.000Z
2022-03-11T23:23:05.000Z
estatisticas_facebook/users/migrations/0001_initial.py
danieldourado/estatisticas_facebook_django
67274e647cf9e2261f1a7810cd9862a4040dfc06
[ "MIT" ]
1
2021-03-27T16:18:56.000Z
2021-03-27T16:18:56.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2017-12-06 11:33 from __future__ import unicode_literals import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('name', models.CharField(blank=True, max_length=255, verbose_name='Name of User')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'abstract': False, 'verbose_name': 'user', 'verbose_name_plural': 'users', }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
63.166667
329
0.662929
from __future__ import unicode_literals import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('name', models.CharField(blank=True, max_length=255, verbose_name='Name of User')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'abstract': False, 'verbose_name': 'user', 'verbose_name_plural': 'users', }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
true
true
1c460421d2d07bef9ca1d09f5ba7710449101a3e
689
py
Python
battleships/managers/GameOver.py
wkta/Python-Bataille-Navale
b6f725519cf1cf559e60ec766aa4f059463b1493
[ "MIT" ]
null
null
null
battleships/managers/GameOver.py
wkta/Python-Bataille-Navale
b6f725519cf1cf559e60ec766aa4f059463b1493
[ "MIT" ]
null
null
null
battleships/managers/GameOver.py
wkta/Python-Bataille-Navale
b6f725519cf1cf559e60ec766aa4f059463b1493
[ "MIT" ]
1
2019-12-03T15:42:38.000Z
2019-12-03T15:42:38.000Z
# Copyright © 2019 CAILLAUD Jean-Baptiste. # import the engine. import engine class GameOver(engine.LevelManager): """ Renders the game over screen. """ def begin(self): # Add the close handler. engine.Engine.input_handler.add_listener(engine.CloseOnEscapeOrQuit()) # Create the game over message. text = engine.TextGameObject( engine.Engine.scene, "Futura", 48, "You won !" if engine.Engine.game_manager.winner == 0 else "You lost ...", (255, 255, 255) ) text.transform.position = engine.math.Vector2(256, 256) text.transform.offset = text.size / -2
26.5
86
0.596517
import engine class GameOver(engine.LevelManager): def begin(self): engine.Engine.input_handler.add_listener(engine.CloseOnEscapeOrQuit()) text = engine.TextGameObject( engine.Engine.scene, "Futura", 48, "You won !" if engine.Engine.game_manager.winner == 0 else "You lost ...", (255, 255, 255) ) text.transform.position = engine.math.Vector2(256, 256) text.transform.offset = text.size / -2
true
true
1c460473d11c959ddd81ee921f32596dbd8e1e9e
754
py
Python
snakewatch/action/__init__.py
asoc/snakewatch
347b94e0ca59cdb309a6950fa3b5464c8d0081f8
[ "BSD-3-Clause" ]
null
null
null
snakewatch/action/__init__.py
asoc/snakewatch
347b94e0ca59cdb309a6950fa3b5464c8d0081f8
[ "BSD-3-Clause" ]
null
null
null
snakewatch/action/__init__.py
asoc/snakewatch
347b94e0ca59cdb309a6950fa3b5464c8d0081f8
[ "BSD-3-Clause" ]
null
null
null
""" This file is part of snakewatch. snakewatch is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. snakewatch is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with snakewatch. If not, see <http://www.gnu.org/licenses/>. """ from __future__ import print_function, absolute_import, unicode_literals, division
37.7
82
0.795756
from __future__ import print_function, absolute_import, unicode_literals, division
true
true
1c4604bd0cd7e6cda5fbccf019cd6209998f11b6
2,134
py
Python
tests/unit/pypyr/parser/jsonfile_test.py
pypyr/pypyr-cli
dc0f694ac0c0e3c2844c1a20788c9af586a8a16e
[ "Apache-2.0" ]
31
2017-03-24T11:27:34.000Z
2020-05-27T20:06:28.000Z
tests/unit/pypyr/parser/jsonfile_test.py
pypyr/pypyr-cli
dc0f694ac0c0e3c2844c1a20788c9af586a8a16e
[ "Apache-2.0" ]
89
2017-04-12T09:50:32.000Z
2020-08-13T13:18:36.000Z
tests/unit/pypyr/parser/jsonfile_test.py
pypyr/pypyr-cli
dc0f694ac0c0e3c2844c1a20788c9af586a8a16e
[ "Apache-2.0" ]
6
2017-06-04T14:19:59.000Z
2020-02-10T13:16:40.000Z
"""jsonfile.py unit tests.""" from unittest.mock import patch import pytest import pypyr.parser.jsonfile def test_json_file_open_fails_on_arbitrary_string(): """Non path-y input string should fail.""" with pytest.raises(FileNotFoundError): pypyr.parser.jsonfile.get_parsed_context('value 1,value 2, value3') def test_json_file_open_fails_on_empty_string(): """Non path-y input string should fail.""" with pytest.raises(AssertionError): pypyr.parser.jsonfile.get_parsed_context(None) def test_json_pass(fs): """Relative path to json should succeed.""" in_path = './tests/testfiles/test.json' fs.create_file(in_path, contents="""{ "key1": "value1", "key2": "value2", "key3": "value3" } """) context = pypyr.parser.jsonfile.get_parsed_context([in_path]) assert context, "context shouldn't be None" assert len(context) == 3, "context should have 3 items" assert context["key2"] == "value2", "key2 should be value2" @patch('pypyr.config.config.default_encoding', new='utf-16') def test_json_pass_with_encoding(fs): """Relative path to json should succeed with encoding.""" in_path = './tests/testfiles/test.json' fs.create_file(in_path, contents="""{ "key1": "value1", "key2": "value2", "key3": "value3" } """, encoding='utf-16') context = pypyr.parser.jsonfile.get_parsed_context([in_path]) assert context, "context shouldn't be None" assert len(context) == 3, "context should have 3 items" assert context["key2"] == "value2", "key2 should be value2" def test_json_parse_not_mapping_at_root(fs): """Not mapping at root level raises.""" in_path = './tests/testfiles/singleliteral.json' fs.create_file(in_path, contents='123') with pytest.raises(TypeError) as err_info: pypyr.parser.jsonfile.get_parsed_context([in_path]) assert str(err_info.value) == ( "json input should describe an object at the top " "level. You should have something like\n" "{\n\"key1\":\"value1\",\n\"key2\":\"value2\"\n}\n" "at the json top-level, not an [array] or literal.")
31.850746
75
0.679944
from unittest.mock import patch import pytest import pypyr.parser.jsonfile def test_json_file_open_fails_on_arbitrary_string(): with pytest.raises(FileNotFoundError): pypyr.parser.jsonfile.get_parsed_context('value 1,value 2, value3') def test_json_file_open_fails_on_empty_string(): with pytest.raises(AssertionError): pypyr.parser.jsonfile.get_parsed_context(None) def test_json_pass(fs): in_path = './tests/testfiles/test.json' fs.create_file(in_path, contents="""{ "key1": "value1", "key2": "value2", "key3": "value3" } """) context = pypyr.parser.jsonfile.get_parsed_context([in_path]) assert context, "context shouldn't be None" assert len(context) == 3, "context should have 3 items" assert context["key2"] == "value2", "key2 should be value2" @patch('pypyr.config.config.default_encoding', new='utf-16') def test_json_pass_with_encoding(fs): in_path = './tests/testfiles/test.json' fs.create_file(in_path, contents="""{ "key1": "value1", "key2": "value2", "key3": "value3" } """, encoding='utf-16') context = pypyr.parser.jsonfile.get_parsed_context([in_path]) assert context, "context shouldn't be None" assert len(context) == 3, "context should have 3 items" assert context["key2"] == "value2", "key2 should be value2" def test_json_parse_not_mapping_at_root(fs): in_path = './tests/testfiles/singleliteral.json' fs.create_file(in_path, contents='123') with pytest.raises(TypeError) as err_info: pypyr.parser.jsonfile.get_parsed_context([in_path]) assert str(err_info.value) == ( "json input should describe an object at the top " "level. You should have something like\n" "{\n\"key1\":\"value1\",\n\"key2\":\"value2\"\n}\n" "at the json top-level, not an [array] or literal.")
true
true
1c46065a2d7cec80d32a5396991fd1b74b074e66
8,727
py
Python
syncflux.py
nagylzs/syncflux
c070267065cad817708d0680e17bfe5f8942310f
[ "Apache-2.0" ]
null
null
null
syncflux.py
nagylzs/syncflux
c070267065cad817708d0680e17bfe5f8942310f
[ "Apache-2.0" ]
null
null
null
syncflux.py
nagylzs/syncflux
c070267065cad817708d0680e17bfe5f8942310f
[ "Apache-2.0" ]
null
null
null
import copy import datetime import sys import os import time import argparse import traceback import pytz import syncthing from influxdb import InfluxDBClient import yaml from yaml2dataclass import Schema, SchemaPath from typing import Optional, Dict, Type, List from dataclasses import dataclass, asdict, field @dataclass class SyncthingConfiguration(Schema): name: str api_key: str host: str = 'localhost' port: int = field(default=8384) timeout: float = field(default=10.0) is_https: bool = field(default=False) ssl_cert_file: Optional[str] = field(default=None) tags: Optional[List[str]] = field(default_factory=lambda: []) def get_client_params(self): result = asdict(self) if "name" in result: del result["name"] if "tags" in result: del result["tags"] return result @dataclass class InfluxDbConfiguration(Schema): host: str port: int # Common ports: 443 ssl: bool verify_ssl: bool database: str username: str password: str def get_client_params(self): result = asdict(self) if "tags" in result: del result["tags"] return result @dataclass class MeasurementConfiguration(Schema): devices: str folders: str @dataclass class AppConfiguration(Schema): syncthings: Dict[str, SyncthingConfiguration] influxes: Dict[str, InfluxDbConfiguration] measurements: MeasurementConfiguration @classmethod def _load_dict(cls, props_dict, dest_cls: Type[Schema], add_name: bool = False): result = {} for name, value in props_dict.items(): arguments = {} arguments.update(value) if add_name: arguments["name"] = name result[name] = dest_cls.scm_load_from_dict(arguments) return result @classmethod def scm_convert(cls, values: dict, path: SchemaPath): values["syncthings"] = cls._load_dict(values["syncthings"], SyncthingConfiguration, True) values["influxes"] = cls._load_dict(values["influxes"], InfluxDbConfiguration) return values def load_app_config(stream) -> AppConfiguration: """Load application configuration from a stream.""" obj = yaml.safe_load(stream) return AppConfiguration.scm_load_from_dict(obj) def error(message: str): sys.stderr.write("\nerror: " + message + "\n") sys.stderr.flush() raise SystemExit(-1) def info(*values): if not args.silent: print(*values) def main(): # Collect data points = [] for sync in config.syncthings.values(): info(" Connect syncthing %s" % sync.name) proto_tags = {"cfg_name": sync.name} if sync.tags: proto_tags.update(sync.tags) conn_args = sync.get_client_params() q_started = time.time() conn = syncthing.Syncthing(**conn_args) now = datetime.datetime.now(tz=pytz.UTC) sync_cfg = conn.system.config() # My own device id my_device = sync_cfg["defaults"]["folder"]["devices"][0] my_id = my_device["deviceID"] proto_tags["my_id"] = my_id # Collect device stats device_stats = conn.stats.device() # List all remote devices remote_devices = [] for device in sync_cfg["devices"]: device_id = device["deviceID"] if device_id == my_id: proto_tags["my_name"] = device["name"] else: stats = device_stats[device_id] last_seen = syncthing.parse_datetime(stats["lastSeen"]) last_seen_since = now - last_seen remote_devices.append({ "tags": { "id": device["deviceID"], # Device ID "name": device["name"], # Device Name }, "fields": { "last_seen_since_sec": last_seen_since.total_seconds(), # Number of seconds last seen } }) # Folders folders = [] for folder in sync_cfg["folders"]: # Get completion for my own device completion = conn.database.completion(my_id, folder["id"]) folders.append({ "tags": {"id": folder["id"], "label": folder["label"], "path": folder["path"]}, "fields": {"completion": completion}, }) q_elapsed = time.time() - q_started proto_fields = {"q_elapsed": q_elapsed} # Create data points for devices for device in remote_devices: tags = copy.copy(proto_tags) tags.update(device["tags"]) fields = copy.copy(proto_fields) fields.update(device["fields"]) point = dict(measurement=config.measurements.devices, tags=tags, fields=fields) points.append(point) # Create points for folders for folder in folders: tags = copy.copy(proto_tags) tags.update(folder["tags"]) fields = copy.copy(proto_fields) fields.update(folder["fields"]) point = dict(measurement=config.measurements.folders, tags=tags, fields=fields) points.append(point) if not points: return for influx_name, influx in config.influxes.items(): info(" Sending %d point(s) to influxdb %s" % (len(points), influx_name)) try: influx = config.influxes[influx_name] client = InfluxDBClient(**asdict(influx)) client.write_points(points) except: if args.halt_on_send_error: raise else: traceback.print_exc(file=sys.stderr) parser = argparse.ArgumentParser(description='Monitor your Syncthing instances with influxdb.') parser.add_argument('-c', "--config", dest="config", default=None, help="Configuration file for application. Default is syncflux.yml. " "See syncflux_example.yml for an example.") parser.add_argument("--config-dir", dest="config_dir", default=None, help="Configuration directory. All config files with .yml extension will be processed one by one.") parser.add_argument('-n', "--count", dest="count", default=1, type=int, help="Number of test runs. Default is one. Use -1 to run indefinitely.") parser.add_argument('-w', "--wait", dest="wait", default=60, type=float, help="Number of seconds between test runs.") parser.add_argument("-s", "--silent", dest='silent', action="store_true", default=False, help="Supress all messages except errors.") parser.add_argument("-v", "--verbose", dest='verbose', action="store_true", default=False, help="Be verbose." ) parser.add_argument("--halt-on-send-error", dest="halt_on_send_error", default=False, action="store_true", help="Halt when cannot send data to influxdb. The default is to ignore the error.") args = parser.parse_args() if args.silent and args.verbose: parser.error("Cannot use --silent and --verbose at the same time.") if args.config is None: args.config = "syncflux.yml" if (args.config is not None) and (args.config_dir is not None): parser.error("You must give either --config or --config-dir (exactly one of them)") if args.count == 0: parser.error("Test run count cannot be zero.") if args.wait <= 0: parser.error("Wait time must be positive.") if args.config: config_files = [args.config] else: config_files = [] for file_name in sorted(os.listdir(args.config_dir)): ext = os.path.splitext(file_name)[1] if ext.lower() == ".yml": fpath = os.path.join(args.config_dir, file_name) config_files.append(fpath) index = 0 while args.count < 0 or index < args.count: if args.count != 1: info("Pass #%d started" % (index + 1)) started = time.time() for config_file in config_files: if not os.path.isfile(config_file): parser.error("Cannot open %s" % config_file) config = load_app_config(open(config_file, "r")) main() elapsed = time.time() - started index += 1 last_one = (args.count > 0) and (index == args.count) if not last_one: remaining = args.wait - elapsed if remaining > 0: if not args.silent: info("Pass #%d elapsed %.2f sec, waiting %.2f sec for next." % (index, elapsed, remaining)) time.sleep(args.wait) else: info("Pass #%d elapsed %.2f sec" % (index, elapsed)) info("")
33.694981
119
0.605936
import copy import datetime import sys import os import time import argparse import traceback import pytz import syncthing from influxdb import InfluxDBClient import yaml from yaml2dataclass import Schema, SchemaPath from typing import Optional, Dict, Type, List from dataclasses import dataclass, asdict, field @dataclass class SyncthingConfiguration(Schema): name: str api_key: str host: str = 'localhost' port: int = field(default=8384) timeout: float = field(default=10.0) is_https: bool = field(default=False) ssl_cert_file: Optional[str] = field(default=None) tags: Optional[List[str]] = field(default_factory=lambda: []) def get_client_params(self): result = asdict(self) if "name" in result: del result["name"] if "tags" in result: del result["tags"] return result @dataclass class InfluxDbConfiguration(Schema): host: str port: int ssl: bool verify_ssl: bool database: str username: str password: str def get_client_params(self): result = asdict(self) if "tags" in result: del result["tags"] return result @dataclass class MeasurementConfiguration(Schema): devices: str folders: str @dataclass class AppConfiguration(Schema): syncthings: Dict[str, SyncthingConfiguration] influxes: Dict[str, InfluxDbConfiguration] measurements: MeasurementConfiguration @classmethod def _load_dict(cls, props_dict, dest_cls: Type[Schema], add_name: bool = False): result = {} for name, value in props_dict.items(): arguments = {} arguments.update(value) if add_name: arguments["name"] = name result[name] = dest_cls.scm_load_from_dict(arguments) return result @classmethod def scm_convert(cls, values: dict, path: SchemaPath): values["syncthings"] = cls._load_dict(values["syncthings"], SyncthingConfiguration, True) values["influxes"] = cls._load_dict(values["influxes"], InfluxDbConfiguration) return values def load_app_config(stream) -> AppConfiguration: obj = yaml.safe_load(stream) return AppConfiguration.scm_load_from_dict(obj) def error(message: str): sys.stderr.write("\nerror: " + message + "\n") sys.stderr.flush() raise SystemExit(-1) def info(*values): if not args.silent: print(*values) def main(): points = [] for sync in config.syncthings.values(): info(" Connect syncthing %s" % sync.name) proto_tags = {"cfg_name": sync.name} if sync.tags: proto_tags.update(sync.tags) conn_args = sync.get_client_params() q_started = time.time() conn = syncthing.Syncthing(**conn_args) now = datetime.datetime.now(tz=pytz.UTC) sync_cfg = conn.system.config() my_device = sync_cfg["defaults"]["folder"]["devices"][0] my_id = my_device["deviceID"] proto_tags["my_id"] = my_id device_stats = conn.stats.device() remote_devices = [] for device in sync_cfg["devices"]: device_id = device["deviceID"] if device_id == my_id: proto_tags["my_name"] = device["name"] else: stats = device_stats[device_id] last_seen = syncthing.parse_datetime(stats["lastSeen"]) last_seen_since = now - last_seen remote_devices.append({ "tags": { "id": device["deviceID"], "name": device["name"], }, "fields": { "last_seen_since_sec": last_seen_since.total_seconds(), } }) folders = [] for folder in sync_cfg["folders"]: completion = conn.database.completion(my_id, folder["id"]) folders.append({ "tags": {"id": folder["id"], "label": folder["label"], "path": folder["path"]}, "fields": {"completion": completion}, }) q_elapsed = time.time() - q_started proto_fields = {"q_elapsed": q_elapsed} for device in remote_devices: tags = copy.copy(proto_tags) tags.update(device["tags"]) fields = copy.copy(proto_fields) fields.update(device["fields"]) point = dict(measurement=config.measurements.devices, tags=tags, fields=fields) points.append(point) for folder in folders: tags = copy.copy(proto_tags) tags.update(folder["tags"]) fields = copy.copy(proto_fields) fields.update(folder["fields"]) point = dict(measurement=config.measurements.folders, tags=tags, fields=fields) points.append(point) if not points: return for influx_name, influx in config.influxes.items(): info(" Sending %d point(s) to influxdb %s" % (len(points), influx_name)) try: influx = config.influxes[influx_name] client = InfluxDBClient(**asdict(influx)) client.write_points(points) except: if args.halt_on_send_error: raise else: traceback.print_exc(file=sys.stderr) parser = argparse.ArgumentParser(description='Monitor your Syncthing instances with influxdb.') parser.add_argument('-c', "--config", dest="config", default=None, help="Configuration file for application. Default is syncflux.yml. " "See syncflux_example.yml for an example.") parser.add_argument("--config-dir", dest="config_dir", default=None, help="Configuration directory. All config files with .yml extension will be processed one by one.") parser.add_argument('-n', "--count", dest="count", default=1, type=int, help="Number of test runs. Default is one. Use -1 to run indefinitely.") parser.add_argument('-w', "--wait", dest="wait", default=60, type=float, help="Number of seconds between test runs.") parser.add_argument("-s", "--silent", dest='silent', action="store_true", default=False, help="Supress all messages except errors.") parser.add_argument("-v", "--verbose", dest='verbose', action="store_true", default=False, help="Be verbose." ) parser.add_argument("--halt-on-send-error", dest="halt_on_send_error", default=False, action="store_true", help="Halt when cannot send data to influxdb. The default is to ignore the error.") args = parser.parse_args() if args.silent and args.verbose: parser.error("Cannot use --silent and --verbose at the same time.") if args.config is None: args.config = "syncflux.yml" if (args.config is not None) and (args.config_dir is not None): parser.error("You must give either --config or --config-dir (exactly one of them)") if args.count == 0: parser.error("Test run count cannot be zero.") if args.wait <= 0: parser.error("Wait time must be positive.") if args.config: config_files = [args.config] else: config_files = [] for file_name in sorted(os.listdir(args.config_dir)): ext = os.path.splitext(file_name)[1] if ext.lower() == ".yml": fpath = os.path.join(args.config_dir, file_name) config_files.append(fpath) index = 0 while args.count < 0 or index < args.count: if args.count != 1: info("Pass #%d started" % (index + 1)) started = time.time() for config_file in config_files: if not os.path.isfile(config_file): parser.error("Cannot open %s" % config_file) config = load_app_config(open(config_file, "r")) main() elapsed = time.time() - started index += 1 last_one = (args.count > 0) and (index == args.count) if not last_one: remaining = args.wait - elapsed if remaining > 0: if not args.silent: info("Pass #%d elapsed %.2f sec, waiting %.2f sec for next." % (index, elapsed, remaining)) time.sleep(args.wait) else: info("Pass #%d elapsed %.2f sec" % (index, elapsed)) info("")
true
true
1c46086c638aabe3f56e864c3f814d7d84d20949
9,687
py
Python
lib/spack/spack/build_systems/cuda.py
varioustoxins/spack
cab0e4cb240f34891a6d753f3393e512f9a99e9a
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
lib/spack/spack/build_systems/cuda.py
varioustoxins/spack
cab0e4cb240f34891a6d753f3393e512f9a99e9a
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
4
2022-02-28T11:32:57.000Z
2022-03-02T11:37:37.000Z
lib/spack/spack/build_systems/cuda.py
varioustoxins/spack
cab0e4cb240f34891a6d753f3393e512f9a99e9a
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import spack.variant from spack.directives import conflicts, depends_on, variant from spack.multimethod import when from spack.package import PackageBase class CudaPackage(PackageBase): """Auxiliary class which contains CUDA variant, dependencies and conflicts and is meant to unify and facilitate its usage. Maintainers: ax3l, Rombur, davidbeckingsale """ # https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list # https://developer.nvidia.com/cuda-gpus # https://en.wikipedia.org/wiki/CUDA#GPUs_supported cuda_arch_values = ( '10', '11', '12', '13', '20', '21', '30', '32', '35', '37', '50', '52', '53', '60', '61', '62', '70', '72', '75', '80', '86' ) # FIXME: keep cuda and cuda_arch separate to make usage easier until # Spack has depends_on(cuda, when='cuda_arch!=None') or alike variant('cuda', default=False, description='Build with CUDA') variant('cuda_arch', description='CUDA architecture', values=spack.variant.any_combination_of(*cuda_arch_values), when='+cuda') # https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#nvcc-examples # https://llvm.org/docs/CompileCudaWithLLVM.html#compiling-cuda-code @staticmethod def cuda_flags(arch_list): return [('--generate-code arch=compute_{0},code=sm_{0} ' '--generate-code arch=compute_{0},code=compute_{0}').format(s) for s in arch_list] depends_on('cuda', when='+cuda') # CUDA version vs Architecture # https://en.wikipedia.org/wiki/CUDA#GPUs_supported # https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#deprecated-features depends_on('cuda@:6.0', when='cuda_arch=10') depends_on('cuda@:6.5', when='cuda_arch=11') depends_on('cuda@2.1:6.5', when='cuda_arch=12') depends_on('cuda@2.1:6.5', when='cuda_arch=13') depends_on('cuda@3.0:8.0', when='cuda_arch=20') depends_on('cuda@3.2:8.0', when='cuda_arch=21') depends_on('cuda@5.0:10.2', when='cuda_arch=30') depends_on('cuda@5.0:10.2', when='cuda_arch=32') depends_on('cuda@5.0:', when='cuda_arch=35') depends_on('cuda@6.5:', when='cuda_arch=37') depends_on('cuda@6.0:', when='cuda_arch=50') depends_on('cuda@6.5:', when='cuda_arch=52') depends_on('cuda@6.5:', when='cuda_arch=53') depends_on('cuda@8.0:', when='cuda_arch=60') depends_on('cuda@8.0:', when='cuda_arch=61') depends_on('cuda@8.0:', when='cuda_arch=62') depends_on('cuda@9.0:', when='cuda_arch=70') depends_on('cuda@9.0:', when='cuda_arch=72') depends_on('cuda@10.0:', when='cuda_arch=75') depends_on('cuda@11.0:', when='cuda_arch=80') depends_on('cuda@11.1:', when='cuda_arch=86') # From the NVIDIA install guide we know of conflicts for particular # platforms (linux, darwin), architectures (x86, powerpc) and compilers # (gcc, clang). We don't restrict %gcc and %clang conflicts to # platform=linux, since they should also apply to platform=cray, and may # apply to platform=darwin. We currently do not provide conflicts for # platform=darwin with %apple-clang. # Linux x86_64 compiler conflicts from here: # https://gist.github.com/ax3l/9489132 with when('^cuda~allow-unsupported-compilers'): # GCC # According to # https://github.com/spack/spack/pull/25054#issuecomment-886531664 # these conflicts are valid independently from the architecture # minimum supported versions conflicts('%gcc@:4', when='+cuda ^cuda@11.0:') conflicts('%gcc@:5', when='+cuda ^cuda@11.4:') # maximum supported version # NOTE: # in order to not constrain future cuda version to old gcc versions, # it has been decided to use an upper bound for the latest version. # This implies that the last one in the list has to be updated at # each release of a new cuda minor version. conflicts('%gcc@10:', when='+cuda ^cuda@:11.0') conflicts('%gcc@12:', when='+cuda ^cuda@:11.6') conflicts('%clang@13:', when='+cuda ^cuda@:11.5') conflicts('%clang@14:', when='+cuda ^cuda@:11.6') # https://gist.github.com/ax3l/9489132#gistcomment-3860114 conflicts('%gcc@10', when='+cuda ^cuda@:11.4.0') conflicts('%gcc@5:', when='+cuda ^cuda@:7.5 target=x86_64:') conflicts('%gcc@6:', when='+cuda ^cuda@:8 target=x86_64:') conflicts('%gcc@7:', when='+cuda ^cuda@:9.1 target=x86_64:') conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=x86_64:') conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89 target=x86_64:') conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27 target=x86_64:') conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5 target=x86_64:') conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8 target=x86_64:') conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1 target=x86_64:') conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10 target=x86_64:') conflicts('%pgi@:17,20:', when='+cuda ^cuda@10.1.105:10.2.89 target=x86_64:') conflicts('%pgi@:17,21:', when='+cuda ^cuda@11.0.2:11.1.0 target=x86_64:') conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5 target=x86_64:') conflicts('%clang@:3.7,4:', when='+cuda ^cuda@8.0:9.0 target=x86_64:') conflicts('%clang@:3.7,4.1:', when='+cuda ^cuda@9.1 target=x86_64:') conflicts('%clang@:3.7,5.1:', when='+cuda ^cuda@9.2 target=x86_64:') conflicts('%clang@:3.7,6.1:', when='+cuda ^cuda@10.0.130 target=x86_64:') conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105 target=x86_64:') conflicts('%clang@:3.7,8.1:', when='+cuda ^cuda@10.1.105:10.1.243 target=x86_64:') conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89 target=x86_64:') conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=x86_64:') conflicts('%clang@10:', when='+cuda ^cuda@:11.0.3 target=x86_64:') conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=x86_64:') # x86_64 vs. ppc64le differ according to NVidia docs # Linux ppc64le compiler conflicts from Table from the docs below: # https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html # https://docs.nvidia.com/cuda/archive/9.2/cuda-installation-guide-linux/index.html # https://docs.nvidia.com/cuda/archive/9.1/cuda-installation-guide-linux/index.html # https://docs.nvidia.com/cuda/archive/9.0/cuda-installation-guide-linux/index.html # https://docs.nvidia.com/cuda/archive/8.0/cuda-installation-guide-linux/index.html # information prior to CUDA 9 difficult to find conflicts('%gcc@6:', when='+cuda ^cuda@:9 target=ppc64le:') conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=ppc64le:') conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243 target=ppc64le:') # officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le conflicts('%pgi', when='+cuda ^cuda@:8 target=ppc64le:') conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185 target=ppc64le:') conflicts('%pgi@:17', when='+cuda ^cuda@:10 target=ppc64le:') conflicts('%clang@4:', when='+cuda ^cuda@:9.0.176 target=ppc64le:') conflicts('%clang@5:', when='+cuda ^cuda@:9.1 target=ppc64le:') conflicts('%clang@6:', when='+cuda ^cuda@:9.2 target=ppc64le:') conflicts('%clang@7:', when='+cuda ^cuda@10.0.130 target=ppc64le:') conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105 target=ppc64le:') conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89 target=ppc64le:') conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=ppc64le:') conflicts('%clang@10:', when='+cuda ^cuda@:11.0.2 target=ppc64le:') conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=ppc64le:') # Intel is mostly relevant for x86_64 Linux, even though it also # exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1 conflicts('%intel@:11.0', when='+cuda ^cuda@:3.1') conflicts('%intel@:12.0', when='+cuda ^cuda@5.5:') conflicts('%intel@:13.0', when='+cuda ^cuda@6.0:') conflicts('%intel@:13.2', when='+cuda ^cuda@6.5:') conflicts('%intel@:14.9', when='+cuda ^cuda@7:') # Intel 15.x is compatible with CUDA 7 thru current CUDA conflicts('%intel@16.0:', when='+cuda ^cuda@:8.0.43') conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60') conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9') conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0') conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1') conflicts('%intel@19.2:', when='+cuda ^cuda@:11.1.0') # XL is mostly relevant for ppc64le Linux conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1') conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2') conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.1.0') # Darwin. # TODO: add missing conflicts for %apple-clang cuda@:10 conflicts('platform=darwin', when='+cuda ^cuda@11.0.2: ') # Make sure cuda_arch can not be used without +cuda for value in cuda_arch_values: conflicts('~cuda', when='cuda_arch=' + value)
50.19171
92
0.617425
import spack.variant from spack.directives import conflicts, depends_on, variant from spack.multimethod import when from spack.package import PackageBase class CudaPackage(PackageBase): = ( '10', '11', '12', '13', '20', '21', '30', '32', '35', '37', '50', '52', '53', '60', '61', '62', '70', '72', '75', '80', '86' ) variant('cuda', default=False, description='Build with CUDA') variant('cuda_arch', description='CUDA architecture', values=spack.variant.any_combination_of(*cuda_arch_values), when='+cuda') da_flags(arch_list): return [('--generate-code arch=compute_{0},code=sm_{0} ' '--generate-code arch=compute_{0},code=compute_{0}').format(s) for s in arch_list] depends_on('cuda', when='+cuda') when='cuda_arch=10') depends_on('cuda@:6.5', when='cuda_arch=11') depends_on('cuda@2.1:6.5', when='cuda_arch=12') depends_on('cuda@2.1:6.5', when='cuda_arch=13') depends_on('cuda@3.0:8.0', when='cuda_arch=20') depends_on('cuda@3.2:8.0', when='cuda_arch=21') depends_on('cuda@5.0:10.2', when='cuda_arch=30') depends_on('cuda@5.0:10.2', when='cuda_arch=32') depends_on('cuda@5.0:', when='cuda_arch=35') depends_on('cuda@6.5:', when='cuda_arch=37') depends_on('cuda@6.0:', when='cuda_arch=50') depends_on('cuda@6.5:', when='cuda_arch=52') depends_on('cuda@6.5:', when='cuda_arch=53') depends_on('cuda@8.0:', when='cuda_arch=60') depends_on('cuda@8.0:', when='cuda_arch=61') depends_on('cuda@8.0:', when='cuda_arch=62') depends_on('cuda@9.0:', when='cuda_arch=70') depends_on('cuda@9.0:', when='cuda_arch=72') depends_on('cuda@10.0:', when='cuda_arch=75') depends_on('cuda@11.0:', when='cuda_arch=80') depends_on('cuda@11.1:', when='cuda_arch=86') # platform=linux, since they should also apply to platform=cray, and may # apply to platform=darwin. We currently do not provide conflicts for # platform=darwin with %apple-clang. # Linux x86_64 compiler conflicts from here: # https://gist.github.com/ax3l/9489132 with when('^cuda~allow-unsupported-compilers'): # GCC # According to # https://github.com/spack/spack/pull/25054#issuecomment-886531664 # these conflicts are valid independently from the architecture # minimum supported versions conflicts('%gcc@:4', when='+cuda ^cuda@11.0:') conflicts('%gcc@:5', when='+cuda ^cuda@11.4:') # maximum supported version # NOTE: # in order to not constrain future cuda version to old gcc versions, # it has been decided to use an upper bound for the latest version. # This implies that the last one in the list has to be updated at # each release of a new cuda minor version. conflicts('%gcc@10:', when='+cuda ^cuda@:11.0') conflicts('%gcc@12:', when='+cuda ^cuda@:11.6') conflicts('%clang@13:', when='+cuda ^cuda@:11.5') conflicts('%clang@14:', when='+cuda ^cuda@:11.6') # https://gist.github.com/ax3l/9489132#gistcomment-3860114 conflicts('%gcc@10', when='+cuda ^cuda@:11.4.0') conflicts('%gcc@5:', when='+cuda ^cuda@:7.5 target=x86_64:') conflicts('%gcc@6:', when='+cuda ^cuda@:8 target=x86_64:') conflicts('%gcc@7:', when='+cuda ^cuda@:9.1 target=x86_64:') conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=x86_64:') conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89 target=x86_64:') conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27 target=x86_64:') conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5 target=x86_64:') conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8 target=x86_64:') conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1 target=x86_64:') conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10 target=x86_64:') conflicts('%pgi@:17,20:', when='+cuda ^cuda@10.1.105:10.2.89 target=x86_64:') conflicts('%pgi@:17,21:', when='+cuda ^cuda@11.0.2:11.1.0 target=x86_64:') conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5 target=x86_64:') conflicts('%clang@:3.7,4:', when='+cuda ^cuda@8.0:9.0 target=x86_64:') conflicts('%clang@:3.7,4.1:', when='+cuda ^cuda@9.1 target=x86_64:') conflicts('%clang@:3.7,5.1:', when='+cuda ^cuda@9.2 target=x86_64:') conflicts('%clang@:3.7,6.1:', when='+cuda ^cuda@10.0.130 target=x86_64:') conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105 target=x86_64:') conflicts('%clang@:3.7,8.1:', when='+cuda ^cuda@10.1.105:10.1.243 target=x86_64:') conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89 target=x86_64:') conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=x86_64:') conflicts('%clang@10:', when='+cuda ^cuda@:11.0.3 target=x86_64:') conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=x86_64:') # x86_64 vs. ppc64le differ according to NVidia docs # Linux ppc64le compiler conflicts from Table from the docs below: # https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html # https://docs.nvidia.com/cuda/archive/9.2/cuda-installation-guide-linux/index.html # https://docs.nvidia.com/cuda/archive/9.1/cuda-installation-guide-linux/index.html # https://docs.nvidia.com/cuda/archive/9.0/cuda-installation-guide-linux/index.html # https://docs.nvidia.com/cuda/archive/8.0/cuda-installation-guide-linux/index.html # information prior to CUDA 9 difficult to find conflicts('%gcc@6:', when='+cuda ^cuda@:9 target=ppc64le:') conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=ppc64le:') conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243 target=ppc64le:') # officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le conflicts('%pgi', when='+cuda ^cuda@:8 target=ppc64le:') conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185 target=ppc64le:') conflicts('%pgi@:17', when='+cuda ^cuda@:10 target=ppc64le:') conflicts('%clang@4:', when='+cuda ^cuda@:9.0.176 target=ppc64le:') conflicts('%clang@5:', when='+cuda ^cuda@:9.1 target=ppc64le:') conflicts('%clang@6:', when='+cuda ^cuda@:9.2 target=ppc64le:') conflicts('%clang@7:', when='+cuda ^cuda@10.0.130 target=ppc64le:') conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105 target=ppc64le:') conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89 target=ppc64le:') conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=ppc64le:') conflicts('%clang@10:', when='+cuda ^cuda@:11.0.2 target=ppc64le:') conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=ppc64le:') # Intel is mostly relevant for x86_64 Linux, even though it also # exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1 conflicts('%intel@:11.0', when='+cuda ^cuda@:3.1') conflicts('%intel@:12.0', when='+cuda ^cuda@5.5:') conflicts('%intel@:13.0', when='+cuda ^cuda@6.0:') conflicts('%intel@:13.2', when='+cuda ^cuda@6.5:') conflicts('%intel@:14.9', when='+cuda ^cuda@7:') # Intel 15.x is compatible with CUDA 7 thru current CUDA conflicts('%intel@16.0:', when='+cuda ^cuda@:8.0.43') conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60') conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9') conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0') conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1') conflicts('%intel@19.2:', when='+cuda ^cuda@:11.1.0') # XL is mostly relevant for ppc64le Linux conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1') conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2') conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.1.0') # Darwin. # TODO: add missing conflicts for %apple-clang cuda@:10 conflicts('platform=darwin', when='+cuda ^cuda@11.0.2: ') # Make sure cuda_arch can not be used without +cuda for value in cuda_arch_values: conflicts('~cuda', when='cuda_arch=' + value)
true
true
1c460873137198c0c9f2771d470c8e55b0d5da3b
33
py
Python
step2.py
SirLonsevrot/Lesson_20.11.28
eac91b46441bf641c60d7f5d1340d74f8665614b
[ "Apache-2.0" ]
null
null
null
step2.py
SirLonsevrot/Lesson_20.11.28
eac91b46441bf641c60d7f5d1340d74f8665614b
[ "Apache-2.0" ]
null
null
null
step2.py
SirLonsevrot/Lesson_20.11.28
eac91b46441bf641c60d7f5d1340d74f8665614b
[ "Apache-2.0" ]
null
null
null
print('Георгий ничего не умеет')
16.5
32
0.757576
print('Георгий ничего не умеет')
true
true
1c4608abdf6f6b3a4ea765ffc6252d1d214de3d1
6,003
py
Python
pro/views.py
iyerikuzwe/Award
a5ac352a7d05d23c92167022e00648caeab62590
[ "Unlicense" ]
null
null
null
pro/views.py
iyerikuzwe/Award
a5ac352a7d05d23c92167022e00648caeab62590
[ "Unlicense" ]
null
null
null
pro/views.py
iyerikuzwe/Award
a5ac352a7d05d23c92167022e00648caeab62590
[ "Unlicense" ]
null
null
null
from django.shortcuts import render, redirect, get_object_or_404 from django.http import HttpResponse from django.contrib.auth.decorators import login_required from .forms import ProjectForm, ProfileForm, DesignForm, ContentForm, UsabilityForm from .models import Project, Profile from django.contrib.auth.models import User from rest_framework.response import Response from rest_framework.views import APIView # from .serializer import ProfSerializer, ProjectSerializer from .permissions import IsAuthenticatedOrReadOnly from rest_framework import status @login_required(login_url='/accounts/login/') def index(request): projects = Project.objects.all().order_by('-posted_on') form = DesignForm() form = UsabilityForm() form = ContentForm() return render(request, 'index.html', locals()) @login_required(login_url='/accounts/login/') def new_project(request): """ Function that enables one to upload projects """ profile = Profile.objects.all() for profile in profile: if request.method == 'POST': form = ProjectForm(request.POST, request.FILES) if form.is_valid(): pro = form.save(commit=False) pro.profile = profile pro.user = request.user pro.save() return redirect('landing') else: form = ProjectForm() return render(request, 'new_pro.html', {"form": form}) @login_required(login_url='/accounts/login/') def edit_profile(request): """ Function that enables one to edit their profile information """ current_user = request.user profile = Profile.objects.get(user=request.user) if request.method == 'POST': form = ProfileForm(request.POST, request.FILES) if form.is_valid(): profile = form.save(commit=False) profile.user = current_user profile.save() return redirect('landing') else: form = ProfileForm() return render(request, 'profile/edit-profile.html', {"form": form,}) @login_required(login_url='/accounts/login/') def view_project(request, id): """ Function that enables one to view specific project """ title = "View Project" project = Project.get_pro_by_id(id=id) return render(request, 'view_project.html', locals()) @login_required(login_url='/accounts/login/') def profile(request, user_id): """ Function that enables one to see their profile """ title = "Profile" pros= Project.get_pro_by_user(id= user_id).order_by('-posted_on') profiles = Profile.objects.get(user_id=user_id) users = User.objects.get(id=user_id) return render(request, 'profile/profile.html', locals()) def search_results(request): if 'pro' in request.GET and request.GET["pro"]: search_term = request.GET.get("pro") searched_projects = Project.search_by_title(search_term) message = f"{search_term}" return render(request, 'search.html',{"message":message,"pros": searched_projects}) else: message = "You didn't searched for any term" return render(request, 'search.html',{"message":message}) class ProfList(APIView): permission_classes = (IsAuthenticatedOrReadOnly,) def get(self, request, format=None): all_merchprof = Profile.objects.all() serializers = ProfSerializer(all_merchprof, many=True) return Response(serializers.data) def post(self, request, format=None): serializers = ProfSerializer(data=request.data) if serializers.is_valid(): serializers.save() return Response(serializers.data, status=status.HTTP_201_CREATED) return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST) class ProjectList(APIView): permission_classes = (IsAuthenticatedOrReadOnly,) def get(self, request, format=None): all_merchproj = Project.objects.all() serializers = ProjectSerializer(all_merchproj, many=True) return Response(serializers.data) def post(self, request, format=None): serializers = ProjectSerializer(data=request.data) if serializers.is_valid(): serializers.save() return Response(serializers.data, status=status.HTTP_201_CREATED) return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST) @login_required(login_url='/accounts/login/') def add_design(request, id): project = get_object_or_404(Project, pk=id) if request.method == 'POST': form = DesignForm(request.POST) if form.is_valid(): rate = form.save(commit=False) rate.project = project rate.user_name = request.user rate.profile = request.user.profile rate.save() return redirect('landing') else: form = DesignForm() return render(request, 'index.html',{'form': form}) @login_required(login_url='/accounts/login/') def add_usability(request, id): project = get_object_or_404(Project, pk=id) if request.method == 'POST': form = UsabilityForm(request.POST) if form.is_valid(): rate = form.save(commit=False) rate.project = project rate.user_name = request.user rate.profile = request.user.profile rate.save() return redirect('landing') else: form = UsabilityForm() return render(request, 'index.html',{'form': form}) @login_required(login_url='/accounts/login/') def add_content(request, id): project = get_object_or_404(Project, pk=id) if request.method == 'POST': form = ContentForm(request.POST) if form.is_valid(): rate = form.save(commit=False) rate.project = project rate.user_name = request.user rate.profile = request.user.profile rate.save() return redirect('landing') else: form = ContentForm() return render(request, 'index.html',{'form': form})
33.35
91
0.666167
from django.shortcuts import render, redirect, get_object_or_404 from django.http import HttpResponse from django.contrib.auth.decorators import login_required from .forms import ProjectForm, ProfileForm, DesignForm, ContentForm, UsabilityForm from .models import Project, Profile from django.contrib.auth.models import User from rest_framework.response import Response from rest_framework.views import APIView from .permissions import IsAuthenticatedOrReadOnly from rest_framework import status @login_required(login_url='/accounts/login/') def index(request): projects = Project.objects.all().order_by('-posted_on') form = DesignForm() form = UsabilityForm() form = ContentForm() return render(request, 'index.html', locals()) @login_required(login_url='/accounts/login/') def new_project(request): profile = Profile.objects.all() for profile in profile: if request.method == 'POST': form = ProjectForm(request.POST, request.FILES) if form.is_valid(): pro = form.save(commit=False) pro.profile = profile pro.user = request.user pro.save() return redirect('landing') else: form = ProjectForm() return render(request, 'new_pro.html', {"form": form}) @login_required(login_url='/accounts/login/') def edit_profile(request): current_user = request.user profile = Profile.objects.get(user=request.user) if request.method == 'POST': form = ProfileForm(request.POST, request.FILES) if form.is_valid(): profile = form.save(commit=False) profile.user = current_user profile.save() return redirect('landing') else: form = ProfileForm() return render(request, 'profile/edit-profile.html', {"form": form,}) @login_required(login_url='/accounts/login/') def view_project(request, id): title = "View Project" project = Project.get_pro_by_id(id=id) return render(request, 'view_project.html', locals()) @login_required(login_url='/accounts/login/') def profile(request, user_id): title = "Profile" pros= Project.get_pro_by_user(id= user_id).order_by('-posted_on') profiles = Profile.objects.get(user_id=user_id) users = User.objects.get(id=user_id) return render(request, 'profile/profile.html', locals()) def search_results(request): if 'pro' in request.GET and request.GET["pro"]: search_term = request.GET.get("pro") searched_projects = Project.search_by_title(search_term) message = f"{search_term}" return render(request, 'search.html',{"message":message,"pros": searched_projects}) else: message = "You didn't searched for any term" return render(request, 'search.html',{"message":message}) class ProfList(APIView): permission_classes = (IsAuthenticatedOrReadOnly,) def get(self, request, format=None): all_merchprof = Profile.objects.all() serializers = ProfSerializer(all_merchprof, many=True) return Response(serializers.data) def post(self, request, format=None): serializers = ProfSerializer(data=request.data) if serializers.is_valid(): serializers.save() return Response(serializers.data, status=status.HTTP_201_CREATED) return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST) class ProjectList(APIView): permission_classes = (IsAuthenticatedOrReadOnly,) def get(self, request, format=None): all_merchproj = Project.objects.all() serializers = ProjectSerializer(all_merchproj, many=True) return Response(serializers.data) def post(self, request, format=None): serializers = ProjectSerializer(data=request.data) if serializers.is_valid(): serializers.save() return Response(serializers.data, status=status.HTTP_201_CREATED) return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST) @login_required(login_url='/accounts/login/') def add_design(request, id): project = get_object_or_404(Project, pk=id) if request.method == 'POST': form = DesignForm(request.POST) if form.is_valid(): rate = form.save(commit=False) rate.project = project rate.user_name = request.user rate.profile = request.user.profile rate.save() return redirect('landing') else: form = DesignForm() return render(request, 'index.html',{'form': form}) @login_required(login_url='/accounts/login/') def add_usability(request, id): project = get_object_or_404(Project, pk=id) if request.method == 'POST': form = UsabilityForm(request.POST) if form.is_valid(): rate = form.save(commit=False) rate.project = project rate.user_name = request.user rate.profile = request.user.profile rate.save() return redirect('landing') else: form = UsabilityForm() return render(request, 'index.html',{'form': form}) @login_required(login_url='/accounts/login/') def add_content(request, id): project = get_object_or_404(Project, pk=id) if request.method == 'POST': form = ContentForm(request.POST) if form.is_valid(): rate = form.save(commit=False) rate.project = project rate.user_name = request.user rate.profile = request.user.profile rate.save() return redirect('landing') else: form = ContentForm() return render(request, 'index.html',{'form': form})
true
true