hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f724bf3443b15e4d6d7686f56cce4260cbc558a0 | 460 | py | Python | fma/analyze.py | adipasquale/frontmatter-analysis | 068b8870ee35569a81600f637569ad589087e2a8 | [
"MIT"
] | null | null | null | fma/analyze.py | adipasquale/frontmatter-analysis | 068b8870ee35569a81600f637569ad589087e2a8 | [
"MIT"
] | null | null | null | fma/analyze.py | adipasquale/frontmatter-analysis | 068b8870ee35569a81600f637569ad589087e2a8 | [
"MIT"
] | null | null | null | import pandas as pd
import os
from pathlib import Path
import frontmatter
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("path", help="path containing .md files")
args = parser.parse_args()
data = [frontmatter.load(path).metadata for path in Path(args.path).glob('*.md')]
df = pd.DataFrame(data)
with pd.option_context('display.width', 100):
print(df.describe().transpose())
| 28.75 | 85 | 0.702174 | import pandas as pd
import os
from pathlib import Path
import frontmatter
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("path", help="path containing .md files")
args = parser.parse_args()
data = [frontmatter.load(path).metadata for path in Path(args.path).glob('*.md')]
df = pd.DataFrame(data)
with pd.option_context('display.width', 100):
print(df.describe().transpose())
| true | true |
f724bf5a11fa8d7fe68f54b4735fe3d897c56f22 | 499 | py | Python | python2/main.py | miptleha/bubble-sort | bae2212e80333de343e85c72c0ceef17136e88d5 | [
"MIT"
] | null | null | null | python2/main.py | miptleha/bubble-sort | bae2212e80333de343e85c72c0ceef17136e88d5 | [
"MIT"
] | null | null | null | python2/main.py | miptleha/bubble-sort | bae2212e80333de343e85c72c0ceef17136e88d5 | [
"MIT"
] | null | null | null | import time
def getMaxSubSum(a):
s = 0
s1 = s
for i in range(0, n):
s += a[i]
s1 = max(s1, s)
if (s < 0):
s = 0;
return s1
n = 10000
a = []
for i in range(0, n):
a.append(pow(-1, i) * i)
#for i in range(0, n):
# print(a[i], " ")
#print();
start = time.perf_counter()
res = 0;
for i in range(0, n):
a[0] += 1
res += getMaxSubSum(a)
# print(res, " ")
end = time.perf_counter()
print("{:.5f}".format(end - start), "seconds") | 17.206897 | 46 | 0.478958 | import time
def getMaxSubSum(a):
s = 0
s1 = s
for i in range(0, n):
s += a[i]
s1 = max(s1, s)
if (s < 0):
s = 0;
return s1
n = 10000
a = []
for i in range(0, n):
a.append(pow(-1, i) * i)
start = time.perf_counter()
res = 0;
for i in range(0, n):
a[0] += 1
res += getMaxSubSum(a)
end = time.perf_counter()
print("{:.5f}".format(end - start), "seconds") | true | true |
f724bfa6d406a2deee8665a8e2f1df9aceed69c7 | 15,613 | py | Python | fhirclient/r4models/chargeitemdefinition.py | cspears-mitre/CapStatement | 2390566ed75d420e0615e3a0aacb77e8c030fdcc | [
"Apache-2.0"
] | 1 | 2021-12-24T11:14:38.000Z | 2021-12-24T11:14:38.000Z | fhirclient/r4models/chargeitemdefinition.py | cspears-mitre/CapStatement | 2390566ed75d420e0615e3a0aacb77e8c030fdcc | [
"Apache-2.0"
] | null | null | null | fhirclient/r4models/chargeitemdefinition.py | cspears-mitre/CapStatement | 2390566ed75d420e0615e3a0aacb77e8c030fdcc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.6.0-bd605d07 (http://hl7.org/fhir/StructureDefinition/ChargeItemDefinition) on 2018-12-20.
# 2018, SMART Health IT.
from . import domainresource
class ChargeItemDefinition(domainresource.DomainResource):
""" Definition of properties and rules about how the price and the
applicability of a ChargeItem can be determined.
The ChargeItemDefinition resource provides the properties that apply to the
(billing) codes necessary to calculate costs and prices. The properties may
differ largely depending on type and realm, therefore this resource gives
only a rough structure and requires profiling for each type of billing code
system.
"""
resource_type = "ChargeItemDefinition"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.applicability = None
""" Whether or not the billing code is applicable.
List of `ChargeItemDefinitionApplicability` items (represented as `dict` in JSON). """
self.approvalDate = None
""" When the charge item definition was approved by publisher.
Type `FHIRDate` (represented as `str` in JSON). """
self.code = None
""" Billing codes or product types this definition applies to.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self._copyright = None
""" extension for fhir primitive copyright"""
self.date = None
""" Date last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.derivedFromUri = None
""" Underlying externally-defined charge item definition.
List of `str` items. """
self._derivedFromUri = None
""" extension for fhir primitive derivedFromUri"""
self.description = None
""" Natural language description of the charge item definition.
Type `str`. """
self._description = None
""" extension for fhir primitive description"""
self.effectivePeriod = None
""" When the charge item definition is expected to be used.
Type `Period` (represented as `dict` in JSON). """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self._experimental = None
""" extension for fhir primitive experimental"""
self.identifier = None
""" Additional identifier for the charge item definition.
List of `Identifier` items (represented as `dict` in JSON). """
self.instance = None
""" Instances this definition applies to.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.jurisdiction = None
""" Intended jurisdiction for charge item definition (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.lastReviewDate = None
""" When the charge item definition was last reviewed.
Type `FHIRDate` (represented as `str` in JSON). """
self.partOf = None
""" A larger definition of which this particular definition is a
component or step.
List of `str` items. """
self._partOf = None
""" extension for fhir primitive partOf"""
self.propertyGroup = None
""" Group of properties which are applicable under the same conditions.
List of `ChargeItemDefinitionPropertyGroup` items (represented as `dict` in JSON). """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self._publisher = None
""" extension for fhir primitive publisher"""
self.replaces = None
""" Completed or terminated request(s) whose function is taken by this
new request.
List of `str` items. """
self._replaces = None
""" extension for fhir primitive replaces"""
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self._status = None
""" extension for fhir primitive status"""
self.title = None
""" Name for this charge item definition (human friendly).
Type `str`. """
self._title = None
""" extension for fhir primitive title"""
self.url = None
""" Canonical identifier for this charge item definition, represented
as a URI (globally unique).
Type `str`. """
self._url = None
""" extension for fhir primitive url"""
self.useContext = None
""" The context that the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the charge item definition.
Type `str`. """
self._version = None
""" extension for fhir primitive version"""
super(ChargeItemDefinition, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ChargeItemDefinition, self).elementProperties()
js.extend([
("applicability", "applicability", ChargeItemDefinitionApplicability, True, None, False),
("approvalDate", "approvalDate", fhirdate.FHIRDate, False, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("copyright", "copyright", str, False, None, False),
("_copyright", "_copyright",fhirprimitive.FHIRPrimitive, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("derivedFromUri", "derivedFromUri", str, True, None, False),
("_derivedFromUri", "_derivedFromUri",fhirprimitive.FHIRPrimitive, False, None, False),
("description", "description", str, False, None, False),
("_description", "_description",fhirprimitive.FHIRPrimitive, False, None, False),
("effectivePeriod", "effectivePeriod", period.Period, False, None, False),
("experimental", "experimental", bool, False, None, False),
("_experimental", "_experimental",fhirprimitive.FHIRPrimitive, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("instance", "instance", fhirreference.FHIRReference, True, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("lastReviewDate", "lastReviewDate", fhirdate.FHIRDate, False, None, False),
("partOf", "partOf", str, True, None, False),
("_partOf", "_partOf",fhirprimitive.FHIRPrimitive, False, None, False),
("propertyGroup", "propertyGroup", ChargeItemDefinitionPropertyGroup, True, None, False),
("publisher", "publisher", str, False, None, False),
("_publisher", "_publisher",fhirprimitive.FHIRPrimitive, False, None, False),
("replaces", "replaces", str, True, None, False),
("_replaces", "_replaces",fhirprimitive.FHIRPrimitive, False, None, False),
("status", "status", str, False, None, True),
("_status", "_status",fhirprimitive.FHIRPrimitive, False, None, False),
("title", "title", str, False, None, False),
("_title", "_title",fhirprimitive.FHIRPrimitive, False, None, False),
("url", "url", str, False, None, True),
("_url", "_url",fhirprimitive.FHIRPrimitive, False, None, False),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
("_version", "_version",fhirprimitive.FHIRPrimitive, False, None, False),
])
return js
from . import backboneelement
class ChargeItemDefinitionApplicability(backboneelement.BackboneElement):
""" Whether or not the billing code is applicable.
Expressions that describe applicability criteria for the billing code.
"""
resource_type = "ChargeItemDefinitionApplicability"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Natural language description of the condition.
Type `str`. """
self._description = None
""" extension for fhir primitive description"""
self.expression = None
""" Boolean-valued expression.
Type `str`. """
self._expression = None
""" extension for fhir primitive expression"""
self.language = None
""" Language of the expression.
Type `str`. """
self._language = None
""" extension for fhir primitive language"""
super(ChargeItemDefinitionApplicability, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ChargeItemDefinitionApplicability, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("_description", "_description",fhirprimitive.FHIRPrimitive, False, None, False),
("expression", "expression", str, False, None, False),
("_expression", "_expression",fhirprimitive.FHIRPrimitive, False, None, False),
("language", "language", str, False, None, False),
("_language", "_language",fhirprimitive.FHIRPrimitive, False, None, False),
])
return js
class ChargeItemDefinitionPropertyGroup(backboneelement.BackboneElement):
""" Group of properties which are applicable under the same conditions.
Group of properties which are applicable under the same conditions. If no
applicability rules are established for the group, then all properties
always apply.
"""
resource_type = "ChargeItemDefinitionPropertyGroup"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.applicability = None
""" Conditions under which the priceComponent is applicable.
List of `ChargeItemDefinitionApplicability` items (represented as `dict` in JSON). """
self.priceComponent = None
""" Components of total line item price.
List of `ChargeItemDefinitionPropertyGroupPriceComponent` items (represented as `dict` in JSON). """
super(ChargeItemDefinitionPropertyGroup, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ChargeItemDefinitionPropertyGroup, self).elementProperties()
js.extend([
("applicability", "applicability", ChargeItemDefinitionApplicability, True, None, False),
("priceComponent", "priceComponent", ChargeItemDefinitionPropertyGroupPriceComponent, True, None, False),
])
return js
class ChargeItemDefinitionPropertyGroupPriceComponent(backboneelement.BackboneElement):
""" Components of total line item price.
The price for a ChargeItem may be calculated as a base price with
surcharges/deductions that apply in certain conditions. A
ChargeItemDefinition resource that defines the prices, factors and
conditions that apply to a billing code is currently under developement.
The priceComponent element can be used to offer transparency to the
recipient of the Invoice of how the prices have been calculated.
"""
resource_type = "ChargeItemDefinitionPropertyGroupPriceComponent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.amount = None
""" Monetary amount associated with this component.
Type `Money` (represented as `dict` in JSON). """
self.code = None
""" Code identifying the specific component.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.factor = None
""" Factor used for calculating this component.
Type `float`. """
self._factor = None
""" extension for fhir primitive factor"""
self.type = None
""" base | surcharge | deduction | discount | tax | informational.
Type `str`. """
self._type = None
""" extension for fhir primitive type"""
super(ChargeItemDefinitionPropertyGroupPriceComponent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ChargeItemDefinitionPropertyGroupPriceComponent, self).elementProperties()
js.extend([
("amount", "amount", money.Money, False, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("factor", "factor", float, False, None, False),
("_factor", "_factor",fhirprimitive.FHIRPrimitive, False, None, False),
("type", "type", str, False, None, True),
("_type", "_type",fhirprimitive.FHIRPrimitive, False, None, False),
])
return js
from . import codeableconcept
from . import contactdetail
from . import fhirdate
from . import fhirreference
from . import identifier
from . import money
from . import period
from . import usagecontext
from . import fhirprimitive
| 38.173594 | 117 | 0.609172 |
from . import domainresource
class ChargeItemDefinition(domainresource.DomainResource):
resource_type = "ChargeItemDefinition"
def __init__(self, jsondict=None, strict=True):
self.applicability = None
self.approvalDate = None
self.code = None
self.contact = None
self.copyright = None
self._copyright = None
self.date = None
self.derivedFromUri = None
self._derivedFromUri = None
self.description = None
self._description = None
self.effectivePeriod = None
self.experimental = None
self._experimental = None
self.identifier = None
self.instance = None
self.jurisdiction = None
self.lastReviewDate = None
self.partOf = None
self._partOf = None
self.propertyGroup = None
self.publisher = None
self._publisher = None
self.replaces = None
self._replaces = None
self.status = None
self._status = None
self.title = None
self._title = None
self.url = None
self._url = None
self.useContext = None
self.version = None
self._version = None
super(ChargeItemDefinition, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ChargeItemDefinition, self).elementProperties()
js.extend([
("applicability", "applicability", ChargeItemDefinitionApplicability, True, None, False),
("approvalDate", "approvalDate", fhirdate.FHIRDate, False, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("copyright", "copyright", str, False, None, False),
("_copyright", "_copyright",fhirprimitive.FHIRPrimitive, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("derivedFromUri", "derivedFromUri", str, True, None, False),
("_derivedFromUri", "_derivedFromUri",fhirprimitive.FHIRPrimitive, False, None, False),
("description", "description", str, False, None, False),
("_description", "_description",fhirprimitive.FHIRPrimitive, False, None, False),
("effectivePeriod", "effectivePeriod", period.Period, False, None, False),
("experimental", "experimental", bool, False, None, False),
("_experimental", "_experimental",fhirprimitive.FHIRPrimitive, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("instance", "instance", fhirreference.FHIRReference, True, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("lastReviewDate", "lastReviewDate", fhirdate.FHIRDate, False, None, False),
("partOf", "partOf", str, True, None, False),
("_partOf", "_partOf",fhirprimitive.FHIRPrimitive, False, None, False),
("propertyGroup", "propertyGroup", ChargeItemDefinitionPropertyGroup, True, None, False),
("publisher", "publisher", str, False, None, False),
("_publisher", "_publisher",fhirprimitive.FHIRPrimitive, False, None, False),
("replaces", "replaces", str, True, None, False),
("_replaces", "_replaces",fhirprimitive.FHIRPrimitive, False, None, False),
("status", "status", str, False, None, True),
("_status", "_status",fhirprimitive.FHIRPrimitive, False, None, False),
("title", "title", str, False, None, False),
("_title", "_title",fhirprimitive.FHIRPrimitive, False, None, False),
("url", "url", str, False, None, True),
("_url", "_url",fhirprimitive.FHIRPrimitive, False, None, False),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
("_version", "_version",fhirprimitive.FHIRPrimitive, False, None, False),
])
return js
from . import backboneelement
class ChargeItemDefinitionApplicability(backboneelement.BackboneElement):
resource_type = "ChargeItemDefinitionApplicability"
def __init__(self, jsondict=None, strict=True):
self.description = None
self._description = None
self.expression = None
self._expression = None
self.language = None
self._language = None
super(ChargeItemDefinitionApplicability, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ChargeItemDefinitionApplicability, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("_description", "_description",fhirprimitive.FHIRPrimitive, False, None, False),
("expression", "expression", str, False, None, False),
("_expression", "_expression",fhirprimitive.FHIRPrimitive, False, None, False),
("language", "language", str, False, None, False),
("_language", "_language",fhirprimitive.FHIRPrimitive, False, None, False),
])
return js
class ChargeItemDefinitionPropertyGroup(backboneelement.BackboneElement):
resource_type = "ChargeItemDefinitionPropertyGroup"
def __init__(self, jsondict=None, strict=True):
self.applicability = None
self.priceComponent = None
super(ChargeItemDefinitionPropertyGroup, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ChargeItemDefinitionPropertyGroup, self).elementProperties()
js.extend([
("applicability", "applicability", ChargeItemDefinitionApplicability, True, None, False),
("priceComponent", "priceComponent", ChargeItemDefinitionPropertyGroupPriceComponent, True, None, False),
])
return js
class ChargeItemDefinitionPropertyGroupPriceComponent(backboneelement.BackboneElement):
resource_type = "ChargeItemDefinitionPropertyGroupPriceComponent"
def __init__(self, jsondict=None, strict=True):
self.amount = None
self.code = None
self.factor = None
self._factor = None
self.type = None
self._type = None
super(ChargeItemDefinitionPropertyGroupPriceComponent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ChargeItemDefinitionPropertyGroupPriceComponent, self).elementProperties()
js.extend([
("amount", "amount", money.Money, False, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("factor", "factor", float, False, None, False),
("_factor", "_factor",fhirprimitive.FHIRPrimitive, False, None, False),
("type", "type", str, False, None, True),
("_type", "_type",fhirprimitive.FHIRPrimitive, False, None, False),
])
return js
from . import codeableconcept
from . import contactdetail
from . import fhirdate
from . import fhirreference
from . import identifier
from . import money
from . import period
from . import usagecontext
from . import fhirprimitive
| true | true |
f724bfd76f144d7fb329df62913b3c4cd7da8450 | 1,114 | py | Python | app.py | GonzalezGise/CaC-Python-Grupo-10-2167 | e6e822ba17f9d2110ff41c2520f3b06a764ac0ed | [
"MIT"
] | 1 | 2021-12-03T16:10:27.000Z | 2021-12-03T16:10:27.000Z | app.py | GonzalezGise/CaC-Python-Grupo-10-2167 | e6e822ba17f9d2110ff41c2520f3b06a764ac0ed | [
"MIT"
] | null | null | null | app.py | GonzalezGise/CaC-Python-Grupo-10-2167 | e6e822ba17f9d2110ff41c2520f3b06a764ac0ed | [
"MIT"
] | 5 | 2021-11-15T23:30:05.000Z | 2021-11-30T13:10:59.000Z | # Crear una funcion que permita ingresar al usuario
# Numero enteros... y strings...
# 1- print -> imprime la lista que su fue cargando hasta el momento...
# 2- append a -> siendo a numero entero
# 3- remove b -> siendo b numero entero
# 4- sort
# 5- reverse
# 6- insert c d -> siendo ambos numeros enteros c le indice y d el valor
# 7- exit -> termina el programa
isRunning = True
myList = []
while isRunning:
userInput = input("Ingrese comando: ")
command = userInput.split()
if command[0] == "exit":
isRunning = False
elif command[0] == "append":
# Quizas debamos hacer un chequeo del input
argumentos = command[1]
if argumentos.isdigit():
myList.append(int(argumentos))
elif command[0] == "print":
print(myList)
elif command[0] == "sort":
myList.sort()
elif command[0] == "insert":
myList.insert(int(command[1]),int(command[2]))
#print("Se agrego",command[2],"en el indice",command[1])
# En Javascript teniamos las arrow functions que eran anonimas
#myFuncion = (x) => x**2
myFuncion = lambda x: x**2 | 30.108108 | 72 | 0.633752 |
isRunning = True
myList = []
while isRunning:
userInput = input("Ingrese comando: ")
command = userInput.split()
if command[0] == "exit":
isRunning = False
elif command[0] == "append":
argumentos = command[1]
if argumentos.isdigit():
myList.append(int(argumentos))
elif command[0] == "print":
print(myList)
elif command[0] == "sort":
myList.sort()
elif command[0] == "insert":
myList.insert(int(command[1]),int(command[2]))
myFuncion = lambda x: x**2 | true | true |
f724c14a74f53741ea1f5af11f5d2c8219bed97c | 2,073 | py | Python | contrib/devtools/check-doc.py | deyoonoo/bendos | 5e161bda7006ccc78233415ac3881fde523a3fe6 | [
"MIT"
] | null | null | null | contrib/devtools/check-doc.py | deyoonoo/bendos | 5e161bda7006ccc78233415ac3881fde523a3fe6 | [
"MIT"
] | null | null | null | contrib/devtools/check-doc.py | deyoonoo/bendos | 5e161bda7006ccc78233415ac3881fde523a3fe6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
import sys
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' {} | grep -v '{}'".format(CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' {}".format(CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-sendfreetransactions', '-checklevel', '-liquidityprovider', '-anonymizebdsamount'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True)
docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True)
args_used = set(re.findall(re.compile(REGEX_ARG), used))
args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print("Args used : {}".format(len(args_used)))
print("Args documented : {}".format(len(args_docd)))
print("Args undocumented: {}".format(len(args_need_doc)))
print(args_need_doc)
print("Args unknown : {}".format(len(args_unknown)))
print(args_unknown)
sys.exit(len(args_need_doc))
if __name__ == "__main__":
main()
| 43.1875 | 298 | 0.687892 |
from subprocess import check_output
import re
import sys
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' {} | grep -v '{}'".format(CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' {}".format(CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-sendfreetransactions', '-checklevel', '-liquidityprovider', '-anonymizebdsamount'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True)
docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True)
args_used = set(re.findall(re.compile(REGEX_ARG), used))
args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print("Args used : {}".format(len(args_used)))
print("Args documented : {}".format(len(args_docd)))
print("Args undocumented: {}".format(len(args_need_doc)))
print(args_need_doc)
print("Args unknown : {}".format(len(args_unknown)))
print(args_unknown)
sys.exit(len(args_need_doc))
if __name__ == "__main__":
main()
| true | true |
f724c1f56a74845c221aa7a44ad661b8138463aa | 3,255 | py | Python | pkg/core/core.py | GarrettHaley/ida-cfp | b1fdef053c71b4cb6508291990fc01d95ad4895e | [
"MIT"
] | null | null | null | pkg/core/core.py | GarrettHaley/ida-cfp | b1fdef053c71b4cb6508291990fc01d95ad4895e | [
"MIT"
] | null | null | null | pkg/core/core.py | GarrettHaley/ida-cfp | b1fdef053c71b4cb6508291990fc01d95ad4895e | [
"MIT"
] | 1 | 2020-05-14T20:04:31.000Z | 2020-05-14T20:04:31.000Z | """
Defines `Core`.
Instantiates the module-level logger with the appropriate naming
convention.
"""
import logging
from abc import ABC
from interface.interface import Interface
from astparser.astparser import AstParser
from record.record import Record
from exception.exception import NoFilesSpecifiedError
LOGGER = logging.getLogger(__name__)
class Core(ABC):
"""
Define the object responsible for the project's three main features.
`Core` is also responsible for administrating the `Interface` and
`AstParser` objects but mainly exists as a straightforward and
moderated view inside the complex internal functionality of IDA-CFP.
While the instances of `Interface` and `AstParser` can be explicitly
accessed by other third-party code, this is not recommended as both
objects contain no (strict) immutable state.
"""
def __init__(self) -> None:
"""
Initialize the `Core` object.
Unlike the __init__ of `AstParser`, the internal state of _intr
and _astp persists between files specified.
`self._intr` contains an instance of the `Interface` object and
is responsible for providing access to high level file I/O
functionality.
`self._astp` contains an instance of the `AstParser` object and
is responsible for processing and understanding the abstract
syntax tree (AST) that PycParser generates.
:return: returns nothing
"""
self._intr = Interface()
self._astp = AstParser()
def process_files(self, files: list) -> None:
"""
Process a list of file I/O objects.
For each file specified in the `files` list, its AST
is loaded and properly processed before it is added
to the module-level `Record`.
:param files: list of argparser IO wrappers
:return: returns nothing
"""
# If the `files` list is found to be empty or improperly
# populated then a `NoFileSpecifiedError` is raised
if not files:
raise NoFilesSpecifiedError()
for f_str in files:
ast = self._intr.load_new_ast(f_str.name)
self._astp.process_ast(ast)
# Rather than attempt to integrate the list and dict after
# every file, it saves huge computational complexity to just
# condense the operation and only do it once per run
Record.integrate_list_to_dict()
def generate_bundle(self) -> None:
"""
Generate the bundle interface for disk I/O.
Utilize the `Interface`-based conversion functionality to
convert from the master `Record` dictionary of string: function
pairs to a `json` string dump.
:return: returns nothing
"""
self._intr.convert_dict_to_json(Record.str_func_dict)
def export(self) -> None:
"""
Export the final bundle to disk.
Utilize the `Interface`-based file-I/O system to drop the
converted json string data to out/bundle.json.
:return: returns nothing
"""
self._intr.drop_bundle_to_disk(self._intr.json_data)
| 32.878788 | 73 | 0.651306 |
import logging
from abc import ABC
from interface.interface import Interface
from astparser.astparser import AstParser
from record.record import Record
from exception.exception import NoFilesSpecifiedError
LOGGER = logging.getLogger(__name__)
class Core(ABC):
def __init__(self) -> None:
self._intr = Interface()
self._astp = AstParser()
def process_files(self, files: list) -> None:
if not files:
raise NoFilesSpecifiedError()
for f_str in files:
ast = self._intr.load_new_ast(f_str.name)
self._astp.process_ast(ast)
Record.integrate_list_to_dict()
def generate_bundle(self) -> None:
self._intr.convert_dict_to_json(Record.str_func_dict)
def export(self) -> None:
self._intr.drop_bundle_to_disk(self._intr.json_data)
| true | true |
f724c21e1e6a61b8d8f476e230c0c8957dd47917 | 919 | py | Python | utillity/apicheck.py | dominik-air/lol-afk-buddy | b9e76336803922bd5f60dac33ec34f471eea3422 | [
"MIT"
] | 1 | 2021-10-11T23:02:19.000Z | 2021-10-11T23:02:19.000Z | utillity/apicheck.py | dominik-air/lol-afk-buddy | b9e76336803922bd5f60dac33ec34f471eea3422 | [
"MIT"
] | 2 | 2022-02-04T20:32:18.000Z | 2022-02-04T20:38:49.000Z | utillity/apicheck.py | dominik-air/lol-afk-buddy | b9e76336803922bd5f60dac33ec34f471eea3422 | [
"MIT"
] | 1 | 2022-02-05T15:12:15.000Z | 2022-02-05T15:12:15.000Z | from lcu_driver import Connector
import json
connector = Connector()
@connector.ready
async def connect(connection):
print("LCU API is ready to be used.")
# check if the user is already logged into his account
summoner = await connection.request("get", "/lol-summoner/v1/current-summoner")
if summoner.status != 200:
print(
"Please login into your account to change your icon and restart the script..."
)
else:
data = await summoner.json()
summonerId = data['summonerId']
#request = f"/lol-perks/v1/perks"
request = "/lol-perks/v1/pages"
#request = f"/lol-perks/v1/currentpage"
request_type = "get"
summoner_spells = await connection.request(request_type, request)
save = await summoner_spells.json()
with open("temp.json", "w+") as f:
json.dump(save, f, indent=4)
connector.start()
| 31.689655 | 90 | 0.638738 | from lcu_driver import Connector
import json
connector = Connector()
@connector.ready
async def connect(connection):
print("LCU API is ready to be used.")
summoner = await connection.request("get", "/lol-summoner/v1/current-summoner")
if summoner.status != 200:
print(
"Please login into your account to change your icon and restart the script..."
)
else:
data = await summoner.json()
summonerId = data['summonerId']
request = "/lol-perks/v1/pages"
request_type = "get"
summoner_spells = await connection.request(request_type, request)
save = await summoner_spells.json()
with open("temp.json", "w+") as f:
json.dump(save, f, indent=4)
connector.start()
| true | true |
f724c2d13ac7970d0010056bcfbce749495e3f07 | 4,414 | py | Python | pytorchvideo/models/memory_bank.py | kevinmtian/pytorchvideo | 168e16859a6029ef8ebeb476f9163bebb6c6b87d | [
"Apache-2.0"
] | 2,391 | 2021-04-13T18:10:18.000Z | 2022-03-31T15:07:09.000Z | pytorchvideo/models/memory_bank.py | kevinmtian/pytorchvideo | 168e16859a6029ef8ebeb476f9163bebb6c6b87d | [
"Apache-2.0"
] | 156 | 2021-04-13T18:51:49.000Z | 2022-03-31T08:05:50.000Z | pytorchvideo/models/memory_bank.py | kevinmtian/pytorchvideo | 168e16859a6029ef8ebeb476f9163bebb6c6b87d | [
"Apache-2.0"
] | 231 | 2021-04-14T05:04:55.000Z | 2022-03-22T09:35:46.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorchvideo.layers.utils import set_attributes
class MemoryBank(nn.Module):
"""
Performs Non-Parametric Instance Discrimination for self supervised learning on
video. A memory bank is built to keep and update the historical feature embedding
and use them for contrastive learning.
The original paper is:
Unsupervised Feature Learning via Non-Parametric Instance Discrimination
https://arxiv.org/pdf/1805.01978.pdf
More details can be found from the memory bank part in the following paper:
Momentum Contrast for Unsupervised Visual Representation Learning
https://arxiv.org/pdf/1911.05722.pdf
"""
def __init__(
self,
backbone: nn.Module,
mlp: Optional[nn.Module] = None,
neg_size: int = 4096,
temperature: float = 0.07,
bank_size: int = 1280000,
dim: int = 2048,
mmt: float = 0.999,
) -> None:
"""
Args:
backbone (nn.Module): backbone used to forward the input.
mlp (nn.Module): multi-layer perception used in memory bank instance
discrimination model.
neg_size (int): size of negative samples per instance.
temperature (float): temperature to use for contrastive learning.
bank_size (int): size of the memory bank, expected to be the same size as
the training set.
dim (int): dimension of the channel.
mmt (float): momentum to use.
"""
super().__init__()
set_attributes(self, locals())
self._init_mem_bank(bank_size, dim)
def _init_mem_bank(self, bank_size: int, dim: int) -> None:
"""
Given the memory bank size and the channel dimension, initialize the memory
bank.
Args:
bank_size (int): size of the memory bank, expected to be the same size as
the training set.
dim (int): dimension of the channel.
"""
stdv = 1.0 / math.sqrt(dim / 3)
self.register_buffer(
"memory",
torch.rand(
bank_size,
dim,
)
.mul_(2 * stdv)
.add_(-stdv)
.to(next(self.backbone.parameters()).device),
)
def forward(self, x: torch.Tensor, x_ind: torch.Tensor) -> torch.Tensor:
"""
Perform contrastive learning with random sampled negative instance from the
memory bank. During training, update the memory bank with latest feature
embedding.
Args:
x (torch.tensor): a batch of image with augmentation. The input tensor
shape should able to be feed into the backbone.
x_ind (torch.tensor): the index of the image x from the dataset. Expected
shape is B.
"""
batch_size = x.shape[0]
x = self.backbone(x)
if self.mlp is not None:
x = self.mlp(x)
# Normalize the output embedding before multiplication.
x = F.normalize(x, p=2, dim=1)
# Random sample negative instances from the memory bank.
idx = torch.randint(0, self.bank_size, size=(batch_size, self.neg_size + 1)).to(
x.device
)
# Fill the first with positive instances.
idx.select(1, 0).copy_(x_ind.data)
weight = torch.index_select(self.memory, 0, idx.view(-1)).detach()
weight = weight.view(batch_size, self.neg_size + 1, self.dim)
# Multiplication for contrastive learning.
out = torch.einsum("bkc,bc->bk", weight, x)
out = torch.div(out, self.temperature)
gt = torch.zeros((batch_size,), device=x.device, dtype=torch.long)
loss = torch.nn.functional.cross_entropy(out, gt)
# Update memory during training.
if self.training:
with torch.no_grad():
pos = torch.index_select(self.memory, 0, x_ind.view(-1))
pos.mul_(self.mmt)
pos.add_(torch.mul(x, 1 - self.mmt))
norm = pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated = pos.div(norm)
self.memory.index_copy_(0, x_ind, updated)
return loss
| 38.719298 | 88 | 0.599909 |
import math
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorchvideo.layers.utils import set_attributes
class MemoryBank(nn.Module):
def __init__(
self,
backbone: nn.Module,
mlp: Optional[nn.Module] = None,
neg_size: int = 4096,
temperature: float = 0.07,
bank_size: int = 1280000,
dim: int = 2048,
mmt: float = 0.999,
) -> None:
super().__init__()
set_attributes(self, locals())
self._init_mem_bank(bank_size, dim)
def _init_mem_bank(self, bank_size: int, dim: int) -> None:
stdv = 1.0 / math.sqrt(dim / 3)
self.register_buffer(
"memory",
torch.rand(
bank_size,
dim,
)
.mul_(2 * stdv)
.add_(-stdv)
.to(next(self.backbone.parameters()).device),
)
def forward(self, x: torch.Tensor, x_ind: torch.Tensor) -> torch.Tensor:
batch_size = x.shape[0]
x = self.backbone(x)
if self.mlp is not None:
x = self.mlp(x)
x = F.normalize(x, p=2, dim=1)
idx = torch.randint(0, self.bank_size, size=(batch_size, self.neg_size + 1)).to(
x.device
)
idx.select(1, 0).copy_(x_ind.data)
weight = torch.index_select(self.memory, 0, idx.view(-1)).detach()
weight = weight.view(batch_size, self.neg_size + 1, self.dim)
out = torch.einsum("bkc,bc->bk", weight, x)
out = torch.div(out, self.temperature)
gt = torch.zeros((batch_size,), device=x.device, dtype=torch.long)
loss = torch.nn.functional.cross_entropy(out, gt)
if self.training:
with torch.no_grad():
pos = torch.index_select(self.memory, 0, x_ind.view(-1))
pos.mul_(self.mmt)
pos.add_(torch.mul(x, 1 - self.mmt))
norm = pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated = pos.div(norm)
self.memory.index_copy_(0, x_ind, updated)
return loss
| true | true |
f724c36991439fd46c3b0dcb954ba15f7db2cfd6 | 2,255 | py | Python | workbook/convert.py | hantongliu/BETTER-hacktron | 37a919dd225970649cd9e7a58c74e2d8f0cca88c | [
"Apache-2.0"
] | null | null | null | workbook/convert.py | hantongliu/BETTER-hacktron | 37a919dd225970649cd9e7a58c74e2d8f0cca88c | [
"Apache-2.0"
] | null | null | null | workbook/convert.py | hantongliu/BETTER-hacktron | 37a919dd225970649cd9e7a58c74e2d8f0cca88c | [
"Apache-2.0"
] | 1 | 2020-10-15T13:57:13.000Z | 2020-10-15T13:57:13.000Z | import json
prefix = "http://slipo.eu/id/poi"
rdftype = "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>"
poi = "<http://slipo.eu/def#POI>"
category = "<http://slipo.eu/def#category>"
termPrefix = "http://slipo.eu/id/term"
termValue = "<http://slipo.eu/def#termValue>"
rdf = ""
i = 0
hasGeometry = "<http://www.opengis.net/ont/geosparql#hasGeometry>"
geometryID = "<http://slipo.eu/id/poi/__id__/geometry>"
wkt = "<http://www.opengis.net/ont/geosparql#asWKT>"
epsg = "<http://www.opengis.net/def/crs/EPSG/0/4326>"
wktLiteral = "<http://www.opengis.net/ont/geosparql#wktLiteral>"
#'/home/mmami/FhG/Projects/BETTER/EOPEN/EnglishFloodTweets_10000.json'
with open('EnglishFloodTweets__.json') as json_file:# dummy_tweet.json
data = json.load(json_file)
temp = 0
for p in data['tweets']:
print(p['id'])
temp = temp+1
point_id = p['id']
concepts = p['image_concepts']
if concepts != "n/a":
subject = "<" + prefix + "/" + point_id + ">"
triple = subject + " " + rdftype + " " + poi + " . "
rdf += triple + "\n"
conceptsArray = concepts.split()
for cat in conceptsArray:
term = ("<" + termPrefix + "/%s>" % i)
triple2category = subject + " " + category + " " + term + " .\n"
categoryTerm = subject + " " + termValue + " \"" + cat + "\" .\n"
rdf += triple2category + categoryTerm
i = i+1
locations = p['estimated_locations']
geometry = locations[0]['geometry']
coordinates = geometry['coordinates']
lat = coordinates[0]
long = coordinates[1]
geometryObject = geometryID.replace("__id__", point_id)
geo = subject + " " + hasGeometry + " " + geometryObject + " ."
geoPoint = ((geometryObject + " " + wkt + " \"" + epsg + " POINT(%f %f)\"^^" + wktLiteral + " .") % (lat, long))
rdf += geo + "\n" + geoPoint + "\n"
# print(rdf)
# print(rdf)
output_file = open('EOPEN_POIs_100.nt', 'w+') # append mode
output_file.write(rdf)
output_file.close()
| 35.793651 | 124 | 0.531264 | import json
prefix = "http://slipo.eu/id/poi"
rdftype = "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>"
poi = "<http://slipo.eu/def#POI>"
category = "<http://slipo.eu/def#category>"
termPrefix = "http://slipo.eu/id/term"
termValue = "<http://slipo.eu/def#termValue>"
rdf = ""
i = 0
hasGeometry = "<http://www.opengis.net/ont/geosparql#hasGeometry>"
geometryID = "<http://slipo.eu/id/poi/__id__/geometry>"
wkt = "<http://www.opengis.net/ont/geosparql#asWKT>"
epsg = "<http://www.opengis.net/def/crs/EPSG/0/4326>"
wktLiteral = "<http://www.opengis.net/ont/geosparql#wktLiteral>"
with open('EnglishFloodTweets__.json') as json_file:
data = json.load(json_file)
temp = 0
for p in data['tweets']:
print(p['id'])
temp = temp+1
point_id = p['id']
concepts = p['image_concepts']
if concepts != "n/a":
subject = "<" + prefix + "/" + point_id + ">"
triple = subject + " " + rdftype + " " + poi + " . "
rdf += triple + "\n"
conceptsArray = concepts.split()
for cat in conceptsArray:
term = ("<" + termPrefix + "/%s>" % i)
triple2category = subject + " " + category + " " + term + " .\n"
categoryTerm = subject + " " + termValue + " \"" + cat + "\" .\n"
rdf += triple2category + categoryTerm
i = i+1
locations = p['estimated_locations']
geometry = locations[0]['geometry']
coordinates = geometry['coordinates']
lat = coordinates[0]
long = coordinates[1]
geometryObject = geometryID.replace("__id__", point_id)
geo = subject + " " + hasGeometry + " " + geometryObject + " ."
geoPoint = ((geometryObject + " " + wkt + " \"" + epsg + " POINT(%f %f)\"^^" + wktLiteral + " .") % (lat, long))
rdf += geo + "\n" + geoPoint + "\n"
output_file = open('EOPEN_POIs_100.nt', 'w+')
output_file.write(rdf)
output_file.close()
| true | true |
f724c39610fa99418467816bbb09a2a24a283c11 | 776 | py | Python | alpyro_msgs/tf2_msgs/tf2error.py | rho2/alpyro_msgs | b5a680976c40c83df70d61bb2db1de32a1cde8d3 | [
"MIT"
] | 1 | 2020-12-13T13:07:10.000Z | 2020-12-13T13:07:10.000Z | alpyro_msgs/tf2_msgs/tf2error.py | rho2/alpyro_msgs | b5a680976c40c83df70d61bb2db1de32a1cde8d3 | [
"MIT"
] | null | null | null | alpyro_msgs/tf2_msgs/tf2error.py | rho2/alpyro_msgs | b5a680976c40c83df70d61bb2db1de32a1cde8d3 | [
"MIT"
] | null | null | null | from typing import Final
from alpyro_msgs import RosMessage, string, uint8
class TF2Error(RosMessage):
__msg_typ__ = "tf2_msgs/TF2Error"
__msg_def__ = "dWludDggTk9fRVJST1I9MAp1aW50OCBMT09LVVBfRVJST1I9MQp1aW50OCBDT05ORUNUSVZJVFlfRVJST1I9Mgp1aW50OCBFWFRSQVBPTEFUSU9OX0VSUk9SPTMKdWludDggSU5WQUxJRF9BUkdVTUVOVF9FUlJPUj00CnVpbnQ4IFRJTUVPVVRfRVJST1I9NQp1aW50OCBUUkFOU0ZPUk1fRVJST1I9Ngp1aW50OCBlcnJvcgpzdHJpbmcgZXJyb3Jfc3RyaW5nCgo="
__md5_sum__ = "bc6848fd6fd750c92e38575618a4917d"
NO_ERROR: Final[uint8] = 0
LOOKUP_ERROR: Final[uint8] = 1
CONNECTIVITY_ERROR: Final[uint8] = 2
EXTRAPOLATION_ERROR: Final[uint8] = 3
INVALID_ARGUMENT_ERROR: Final[uint8] = 4
TIMEOUT_ERROR: Final[uint8] = 5
TRANSFORM_ERROR: Final[uint8] = 6
error: uint8
error_string: string
| 40.842105 | 290 | 0.837629 | from typing import Final
from alpyro_msgs import RosMessage, string, uint8
class TF2Error(RosMessage):
__msg_typ__ = "tf2_msgs/TF2Error"
__msg_def__ = "dWludDggTk9fRVJST1I9MAp1aW50OCBMT09LVVBfRVJST1I9MQp1aW50OCBDT05ORUNUSVZJVFlfRVJST1I9Mgp1aW50OCBFWFRSQVBPTEFUSU9OX0VSUk9SPTMKdWludDggSU5WQUxJRF9BUkdVTUVOVF9FUlJPUj00CnVpbnQ4IFRJTUVPVVRfRVJST1I9NQp1aW50OCBUUkFOU0ZPUk1fRVJST1I9Ngp1aW50OCBlcnJvcgpzdHJpbmcgZXJyb3Jfc3RyaW5nCgo="
__md5_sum__ = "bc6848fd6fd750c92e38575618a4917d"
NO_ERROR: Final[uint8] = 0
LOOKUP_ERROR: Final[uint8] = 1
CONNECTIVITY_ERROR: Final[uint8] = 2
EXTRAPOLATION_ERROR: Final[uint8] = 3
INVALID_ARGUMENT_ERROR: Final[uint8] = 4
TIMEOUT_ERROR: Final[uint8] = 5
TRANSFORM_ERROR: Final[uint8] = 6
error: uint8
error_string: string
| true | true |
f724c467f82e4312c2a73d80e60f4b58409440e2 | 4,977 | py | Python | tests/contrib/operators/test_hive_to_dynamodb_operator.py | fxdmhtt/airflow | cf88f7bc7bbd3e9bf110e98f025759a96c130235 | [
"Apache-2.0"
] | 3 | 2019-03-28T05:59:39.000Z | 2019-10-03T22:05:25.000Z | tests/contrib/operators/test_hive_to_dynamodb_operator.py | fxdmhtt/airflow | cf88f7bc7bbd3e9bf110e98f025759a96c130235 | [
"Apache-2.0"
] | 7 | 2019-03-27T07:58:14.000Z | 2020-02-12T17:42:33.000Z | tests/contrib/operators/test_hive_to_dynamodb_operator.py | fxdmhtt/airflow | cf88f7bc7bbd3e9bf110e98f025759a96c130235 | [
"Apache-2.0"
] | 5 | 2017-06-19T19:55:47.000Z | 2020-10-10T00:49:20.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
from unittest import mock
import datetime
import pandas as pd
from airflow import DAG
from airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook
import airflow.contrib.operators.hive_to_dynamodb
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
try:
from moto import mock_dynamodb2
except ImportError:
mock_dynamodb2 = None
class HiveToDynamoDBTransferOperatorTest(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.sql = 'SELECT 1'
self.hook = AwsDynamoDBHook(
aws_conn_id='aws_default', region_name='us-east-1')
@staticmethod
def process_data(data, *args, **kwargs):
return json.loads(data.to_json(orient='records'))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default')
self.assertIsNotNone(hook.get_conn())
@mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_records_with_schema(self, get_results_mock):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name="test_airflow",
task_id='hive_to_dynamodb_check',
table_keys=['id'],
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
@mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid'), ('1', 'gupta')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_pre_process_records_with_schema(self, get_results_mock):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name='test_airflow',
task_id='hive_to_dynamodb_check',
table_keys=['id'],
pre_process=self.process_data,
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
if __name__ == '__main__':
unittest.main()
| 34.089041 | 103 | 0.625477 |
import json
import unittest
from unittest import mock
import datetime
import pandas as pd
from airflow import DAG
from airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook
import airflow.contrib.operators.hive_to_dynamodb
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
try:
from moto import mock_dynamodb2
except ImportError:
mock_dynamodb2 = None
class HiveToDynamoDBTransferOperatorTest(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.sql = 'SELECT 1'
self.hook = AwsDynamoDBHook(
aws_conn_id='aws_default', region_name='us-east-1')
@staticmethod
def process_data(data, *args, **kwargs):
return json.loads(data.to_json(orient='records'))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default')
self.assertIsNotNone(hook.get_conn())
@mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_records_with_schema(self, get_results_mock):
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name="test_airflow",
task_id='hive_to_dynamodb_check',
table_keys=['id'],
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
@mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid'), ('1', 'gupta')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_pre_process_records_with_schema(self, get_results_mock):
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name='test_airflow',
task_id='hive_to_dynamodb_check',
table_keys=['id'],
pre_process=self.process_data,
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
if __name__ == '__main__':
unittest.main()
| true | true |
f724c73e2a3a025bfe20a3a8f316a5cc999fcf47 | 1,013 | py | Python | dashboard/models.py | farahaulita/pbp-tk | fabf8e07ed0e1270d3e98a3d1bdd46267a1a4d6c | [
"Unlicense"
] | null | null | null | dashboard/models.py | farahaulita/pbp-tk | fabf8e07ed0e1270d3e98a3d1bdd46267a1a4d6c | [
"Unlicense"
] | null | null | null | dashboard/models.py | farahaulita/pbp-tk | fabf8e07ed0e1270d3e98a3d1bdd46267a1a4d6c | [
"Unlicense"
] | null | null | null | from django.db import models
from login.models import User #add this
from django.dispatch import receiver #add this
from django.db.models.signals import post_save
from datetime import datetime
# SOURCE: https://www.ordinarycoders.com/django-custom-user-profile
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default = 'undraw_profile.svg', upload_to='profile_pics')
name = models.CharField(max_length=256, default="Enter Name")
birth_date = models.DateField(default=datetime.now)
address = models.CharField(max_length=256, default="Enter Address")
@receiver(post_save, sender=User) #add profile if user is created
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User) #save profile if user is saved
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
| 38.961538 | 87 | 0.740375 | from django.db import models
from login.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
from datetime import datetime
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default = 'undraw_profile.svg', upload_to='profile_pics')
name = models.CharField(max_length=256, default="Enter Name")
birth_date = models.DateField(default=datetime.now)
address = models.CharField(max_length=256, default="Enter Address")
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
| true | true |
f724c7566d86613c348af51cc3a34cdf7fc5d540 | 97,191 | py | Python | snmp/tests/test_profiles.py | onurdialpad/integrations-core | e718b52d5878b20ff161a3ee6f24e5e845102d91 | [
"BSD-3-Clause"
] | null | null | null | snmp/tests/test_profiles.py | onurdialpad/integrations-core | e718b52d5878b20ff161a3ee6f24e5e845102d91 | [
"BSD-3-Clause"
] | null | null | null | snmp/tests/test_profiles.py | onurdialpad/integrations-core | e718b52d5878b20ff161a3ee6f24e5e845102d91 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import logging
import pytest
from datadog_checks.base import ConfigurationError
from datadog_checks.dev.utils import get_metadata_metrics
from datadog_checks.snmp import SnmpCheck
from datadog_checks.snmp.utils import (
_get_profile_name,
_is_abstract_profile,
_iter_default_profile_file_paths,
get_profile_definition,
recursively_expand_base_profiles,
)
from . import common
from .metrics import (
ADAPTER_IF_COUNTS,
CCCA_ROUTER_GAUGES,
CIE_METRICS,
COS_COUNTS,
COS_RATES,
CPU_METRICS,
DCU_COUNTS,
DISK_GAUGES,
DRS_GAUGES,
FIREWALL_COUNTS,
FRU_METRICS,
IF_BANDWIDTH_USAGE,
IF_COUNTS,
IF_GAUGES,
IF_RATES,
IP_COUNTS,
IP_IF_COUNTS,
IPX_COUNTS,
LTM_GAUGES,
LTM_NODES_COUNTS,
LTM_NODES_GAUGES,
LTM_NODES_RATES,
LTM_POOL_COUNTS,
LTM_POOL_GAUGES,
LTM_POOL_MEMBER_COUNTS,
LTM_POOL_MEMBER_GAUGES,
LTM_POOL_MEMBER_RATES,
LTM_POOL_RATES,
LTM_VIRTUAL_SERVER_COUNTS,
LTM_VIRTUAL_SERVER_GAUGES,
LTM_VIRTUAL_SERVER_RATES,
MEMORY_METRICS,
PEER_GAUGES,
PEER_RATES,
PROBE_GAUGES,
SCU_COUNTS,
SYSTEM_STATUS_GAUGES,
TCP_COUNTS,
TCP_GAUGES,
UDP_COUNTS,
USER_FIREWALL,
VIRTUAL_CHASSIS_COUNTS,
VIRTUAL_CHASSIS_RATES,
VOLTAGE_GAUGES,
)
pytestmark = common.python_autodiscovery_only
def test_load_profiles(caplog):
instance = common.generate_instance_config([])
check = SnmpCheck('snmp', {}, [instance])
caplog.at_level(logging.WARNING)
for name, profile in check.profiles.items():
try:
check._config.refresh_with_profile(profile)
except ConfigurationError as e:
pytest.fail("Profile `{}` is not configured correctly: {}".format(name, e))
assert "table doesn't have a 'metric_tags' section" not in caplog.text
caplog.clear()
def test_profile_hierarchy():
"""
* Only concrete profiles MUST inherit from '_base.yaml'.
* Only concrete profiles MUST define a `sysobjectid` field.
"""
errors = []
compat_base_profiles = ['_base_cisco', '_base_cisco_voice']
for path in _iter_default_profile_file_paths():
name = _get_profile_name(path)
definition = get_profile_definition({'definition_file': path})
extends = definition.get('extends', [])
sysobjectid = definition.get('sysobjectid')
if _is_abstract_profile(name):
if '_base.yaml' in extends and name not in compat_base_profiles:
errors.append("'{}': mixin wrongly extends '_base.yaml'".format(name))
if sysobjectid is not None:
errors.append("'{}': mixin wrongly defines a `sysobjectid`".format(name))
else:
if '_base.yaml' not in extends:
errors.append("'{}': concrete profile must directly extend '_base.yaml'".format(name))
if sysobjectid is None:
errors.append("'{}': concrete profile must define a `sysobjectid`".format(name))
if errors:
pytest.fail('\n'.join(sorted(errors)))
def run_profile_check(recording_name, profile_name=None):
"""
Run a single check with the provided `recording_name` used as
`community_string` by the docker SNMP endpoint.
"""
instance = common.generate_instance_config([])
instance['community_string'] = recording_name
instance['enforce_mib_constraints'] = False
check = SnmpCheck('snmp', {}, [instance])
# First, see if recording name is a profile, then use profile as definition.
if profile_name is not None:
profile = check.profiles.get(profile_name)
else:
profile = check.profiles.get(recording_name)
if profile:
try:
test_check = SnmpCheck('snmp', {}, [common.generate_instance_config([])])
test_check._config.refresh_with_profile(profile)
except ConfigurationError as e:
pytest.fail("Profile `{}` is not configured correctly: {}".format(recording_name, e))
check.check(instance)
@pytest.mark.unit
@pytest.mark.parametrize(
'definition_file, equivalent_definition',
[
pytest.param('_base_cisco.yaml', {'extends': ['_base.yaml', '_cisco-generic.yaml']}, id='generic'),
pytest.param(
'_base_cisco_voice.yaml',
{'extends': ['_base.yaml', '_cisco-generic.yaml', '_cisco-voice.yaml']},
id='voice',
),
],
)
def test_compat_cisco_base_profiles(definition_file, equivalent_definition):
# type: (str, dict) -> None
"""
Cisco and Cisco Voice base profiles were replaced by mixins (see Pull #6792).
But their definition files should still be present and contain equivalent metrics to ensure backward compatibility.
"""
definition = get_profile_definition({'definition_file': definition_file})
recursively_expand_base_profiles(definition)
recursively_expand_base_profiles(equivalent_definition)
assert definition == equivalent_definition
@pytest.mark.usefixtures("dd_environment")
def test_cisco_voice(aggregator):
run_profile_check('cisco_icm')
tags = [
'snmp_profile:cisco_icm',
'snmp_host:test',
'device_vendor:cisco',
] + common.CHECK_TAGS
resources = ["hrSWRunPerfMem", "hrSWRunPerfCPU"]
common.assert_common_metrics(aggregator, tags)
for resource in resources:
aggregator.assert_metric('snmp.{}'.format(resource), metric_type=aggregator.GAUGE, tags=tags)
run_indices = [4, 7, 8, 9, 10, 18, 24, 29, 30]
for index in run_indices:
status_tags = tags + ['run_index:{}'.format(index)]
aggregator.assert_metric('snmp.hrSWRunStatus', metric_type=aggregator.GAUGE, tags=status_tags)
cvp_gauges = [
"ccvpSipIntAvgLatency1",
"ccvpSipIntAvgLatency2",
"ccvpSipIntConnectsRcv",
"ccvpSipIntNewCalls",
"ccvpSipRtActiveCalls",
"ccvpSipRtTotalCallLegs",
"ccvpLicRtPortsInUse",
"ccvpLicAggMaxPortsInUse",
]
for cvp in cvp_gauges:
aggregator.assert_metric('snmp.{}'.format(cvp), metric_type=aggregator.GAUGE, tags=tags)
ccms_counts = ["ccmRejectedPhones", "ccmUnregisteredPhones"]
ccms_gauges = ["ccmRegisteredGateways", "ccmRegisteredPhones"]
for ccm in ccms_counts:
aggregator.assert_metric('snmp.{}'.format(ccm), metric_type=aggregator.RATE, tags=tags)
for ccm in ccms_gauges:
aggregator.assert_metric('snmp.{}'.format(ccm), metric_type=aggregator.GAUGE, tags=tags)
calls = [
"cvCallVolPeerIncomingCalls",
"cvCallVolPeerOutgoingCalls",
]
peers = [4, 13, 14, 17, 18, 22, 25, 30, 31]
for call in calls:
for peer in peers:
peer_tags = tags + ["peer_index:{}".format(peer)]
aggregator.assert_metric('snmp.{}'.format(call), metric_type=aggregator.GAUGE, tags=peer_tags)
calls = [
"cvCallVolMediaIncomingCalls",
"cvCallVolMediaOutgoingCalls",
]
for call in calls:
aggregator.assert_metric('snmp.{}'.format(call), metric_type=aggregator.GAUGE, tags=tags)
dial_controls = [
"dialCtlPeerStatsAcceptCalls",
"dialCtlPeerStatsFailCalls",
"dialCtlPeerStatsRefuseCalls",
"dialCtlPeerStatsSuccessCalls",
]
for ctl in dial_controls:
aggregator.assert_metric(
'snmp.{}'.format(ctl), metric_type=aggregator.MONOTONIC_COUNT, tags=["peer_index:7"] + tags
)
pim_tags = tags + ['pim_host:test', 'pim_name:name', 'pim_num:2']
aggregator.assert_metric('snmp.{}'.format("cccaPimStatus"), metric_type=aggregator.GAUGE, tags=pim_tags)
aggregator.assert_metric('snmp.{}'.format("sysUpTimeInstance"), metric_type=aggregator.GAUGE, tags=tags, count=1)
instance_numbers = ['4446', '5179', '12093', '19363', '25033', '37738', '42562', '51845', '62906', '63361']
for metric in CCCA_ROUTER_GAUGES:
for instance_number in instance_numbers:
instance_tags = tags + ['instance_number:{}'.format(instance_number)]
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=instance_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_f5(aggregator):
profile = 'f5-big-ip'
run_profile_check(profile)
gauges = [
'sysStatMemoryTotal',
'sysStatMemoryUsed',
'sysGlobalTmmStatMemoryTotal',
'sysGlobalTmmStatMemoryUsed',
'sysGlobalHostOtherMemoryTotal',
'sysGlobalHostOtherMemoryUsed',
'sysGlobalHostSwapTotal',
'sysGlobalHostSwapUsed',
'sysTcpStatOpen',
'sysTcpStatCloseWait',
'sysTcpStatFinWait',
'sysTcpStatTimeWait',
'sysUdpStatOpen',
'sysClientsslStatCurConns',
]
counts = [
'sysTcpStatAccepts',
'sysTcpStatAcceptfails',
'sysTcpStatConnects',
'sysTcpStatConnfails',
'sysUdpStatAccepts',
'sysUdpStatAcceptfails',
'sysUdpStatConnects',
'sysUdpStatConnfails',
'sysClientsslStatEncryptedBytesIn',
'sysClientsslStatEncryptedBytesOut',
'sysClientsslStatDecryptedBytesIn',
'sysClientsslStatDecryptedBytesOut',
'sysClientsslStatHandshakeFailures',
]
cpu_rates = [
'sysMultiHostCpuUser',
'sysMultiHostCpuNice',
'sysMultiHostCpuSystem',
'sysMultiHostCpuIdle',
'sysMultiHostCpuIrq',
'sysMultiHostCpuSoftirq',
'sysMultiHostCpuIowait',
]
interfaces = [
('1.0', 'desc2'),
('mgmt', 'desc1'),
('/Common/internal', 'desc5'),
('/Common/http-tunnel', 'desc3'),
('/Common/socks-tunnel', 'desc4'),
]
interfaces_with_bandwidth_usage = {
'1.0',
'mgmt',
'/Common/internal',
}
tags = [
'snmp_profile:' + profile,
'snmp_host:f5-big-ip-adc-good-byol-1-vm.c.datadog-integrations-lab.internal',
'device_vendor:f5',
]
tags += common.CHECK_TAGS
common.assert_common_metrics(aggregator, tags)
for metric in gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in counts:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for metric in cpu_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=['cpu:0'] + tags, count=1)
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=['cpu:1'] + tags, count=1)
for interface, desc in interfaces:
interface_tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(desc)] + tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=interface_tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=interface_tags, count=1
)
if interface in interfaces_with_bandwidth_usage:
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=interface_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric(
'snmp.{}'.format(metric),
metric_type=aggregator.GAUGE,
tags=interface_tags,
count=1,
)
for version in ['ipv4', 'ipv6']:
ip_tags = ['ipversion:{}'.format(version)] + tags
for metric in IP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=ip_tags, count=1
)
for metric in LTM_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
servers = ['server1', 'server2', 'server3']
for server in servers:
server_tags = tags + ['server:{}'.format(server)]
for metric in LTM_VIRTUAL_SERVER_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=server_tags, count=1)
for metric in LTM_VIRTUAL_SERVER_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=server_tags, count=1
)
for metric in LTM_VIRTUAL_SERVER_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=server_tags, count=1)
nodes = ['node1', 'node2', 'node3']
for node in nodes:
node_tags = tags + ['node:{}'.format(node)]
for metric in LTM_NODES_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=node_tags, count=1)
for metric in LTM_NODES_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=node_tags, count=1
)
for metric in LTM_NODES_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=node_tags, count=1)
pools = ['pool1', 'pool2']
for pool in pools:
pool_tags = tags + ['pool:{}'.format(pool)]
for metric in LTM_POOL_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=pool_tags, count=1)
for metric in LTM_POOL_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=pool_tags, count=1
)
for metric in LTM_POOL_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=pool_tags, count=1)
pool_members = [('pool1', 'node1'), ('pool1', 'node2'), ('pool2', 'node3')]
for pool, node in pool_members:
pool_member_tags = tags + ['pool:{}'.format(pool), 'node:{}'.format(node)]
for metric in LTM_POOL_MEMBER_GAUGES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=pool_member_tags, count=1
)
for metric in LTM_POOL_MEMBER_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=pool_member_tags, count=1
)
for metric in LTM_POOL_MEMBER_RATES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=pool_member_tags, count=1
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_router(aggregator):
profile = "generic-router"
run_profile_check(profile)
common_tags = common.CHECK_TAGS + ['snmp_profile:' + profile]
common.assert_common_metrics(aggregator, common_tags)
interfaces = [
('eth0', 'kept'),
('eth1', 'their forward oxen'),
]
for interface, if_desc in interfaces:
tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(if_desc)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for version in ['ipv4', 'ipv6']:
tags = ['ipversion:{}'.format(version)] + common_tags
for metric in IP_COUNTS + IPX_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IP_IF_COUNTS:
for interface in ['17', '21']:
tags = ['ipversion:{}'.format(version), 'interface:{}'.format(interface)] + common_tags
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_f5_router(aggregator):
# Use the generic profile against the f5 device
instance = common.generate_instance_config([])
instance['community_string'] = 'f5-big-ip'
instance['enforce_mib_constraints'] = False
init_config = {'profiles': {'router': {'definition_file': 'generic-router.yaml'}}}
check = SnmpCheck('snmp', init_config, [instance])
check.check(instance)
interfaces = [
('1.0', 'desc2'),
('mgmt', 'desc1'),
('/Common/internal', 'desc5'),
('/Common/http-tunnel', 'desc3'),
('/Common/socks-tunnel', 'desc4'),
]
interfaces_with_bandwidth_usage = {
'1.0',
'mgmt',
'/Common/internal',
}
common_tags = [
'snmp_profile:router',
'snmp_host:f5-big-ip-adc-good-byol-1-vm.c.datadog-integrations-lab.internal',
]
common_tags.extend(common.CHECK_TAGS)
common.assert_common_metrics(aggregator, common_tags)
for interface, desc in interfaces:
tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(desc)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
if interface in interfaces_with_bandwidth_usage:
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for version in ['ipv4', 'ipv6']:
tags = ['ipversion:{}'.format(version)] + common_tags
for metric in IP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_3850(aggregator):
profile = "cisco-3850"
run_profile_check(profile)
# We're not covering all interfaces
interfaces = ["Gi1/0/{}".format(i) for i in range(1, 48)]
common_tags = common.CHECK_TAGS + [
'snmp_host:Cat-3850-4th-Floor.companyname.local',
'snmp_profile:' + profile,
'device_vendor:cisco',
]
common.assert_common_metrics(aggregator, common_tags)
aliases = {
'Gi1/0/24': 'LWAP-example',
'Gi1/0/33': 'switchboard console',
'Gi1/0/38': 'Mitel Console',
'Gi1/1/3': 'Link to Switch',
'Gi2/0/13': 'AP01',
'Gi2/0/14': 'AP02',
'Gi2/0/15': 'AP03',
'Gi2/0/16': 'AP04',
'Gi2/0/17': 'AP05',
'Gi2/0/18': 'AP06',
'Gi2/1/4': 'Link to Switch',
}
for interface in interfaces:
alias = aliases.get(interface, '')
tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(alias)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IP_COUNTS + IPX_COUNTS:
tags = common_tags + ['ipversion:ipv6']
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
sensors = [1006, 1007, 1008, 2006, 2007, 2008]
for sensor in sensors:
tags = ['sensor_id:{}'.format(sensor), 'sensor_type:8'] + common_tags
aggregator.assert_metric('snmp.entSensorValue', metric_type=aggregator.GAUGE, tags=tags, count=1)
frus = [1001, 1010, 2001, 2010]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpus = [1000, 2000]
for cpu in cpus:
tags = ['cpu:{}'.format(cpu)] + common_tags
for metric in CPU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in CIE_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.cieIfResetCount', metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
power_supplies = [
(1, 'Switch 1 - Power Supply B, NotExist'),
(1, 'Switch 2 - Power Supply B, NotExist'),
(2, 'Switch 1 - Power Supply A, Normal'),
(2, 'Switch 2 - Power Supply A, Normal'),
]
for source, descr in power_supplies:
env_tags = ['power_source:{}'.format(source), 'power_status_descr:{}'.format(descr)]
aggregator.assert_metric(
'snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=env_tags + common_tags
)
aggregator.assert_metric('snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE)
for switch, mac_addr in [(1, '0x046c9d42b080'), (2, '0xdccec1430680')]:
tags = ['entity_name:Switch {}'.format(switch), 'mac_addr:{}'.format(mac_addr)] + common_tags
aggregator.assert_metric('snmp.cswSwitchState', metric_type=aggregator.GAUGE, tags=tags)
frus = [1011, 1012, 1013, 2011, 2012, 2013]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
aggregator.assert_metric(
'snmp.cefcFanTrayOperStatus', metric_type=aggregator.GAUGE, tags=['fru:{}'.format(fru)] + common_tags
)
for metrics in MEMORY_METRICS:
for pool in ['Processor', 'IOS Process stack']:
tags = ['mem_pool_name:{}'.format(pool)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metrics), metric_type=aggregator.GAUGE, tags=tags)
neighbor_metrics = [
('ospfNbrEvents', aggregator.RATE),
('ospfNbrState', aggregator.GAUGE),
('ospfNbrLsRetransQLen', aggregator.GAUGE),
]
for metric, metric_type in neighbor_metrics:
tags = ['neighbor_ip:192.29.116.26', 'neighbor_id:192.29.66.79'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=metric_type, tags=tags, count=1)
lls_metrics = ['ospfIfRetransInterval', 'ospfIfState']
for metric in lls_metrics:
tags = ['ospf_ip_addr:192.29.116.25'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for temp_index in [1006, 1007, 1008, 2006, 2007, 2008]:
env_tag = ['temp_index:{}'.format(temp_index), 'temp_state:1']
aggregator.assert_metric(
'snmp.ciscoEnvMonTemperatureStatusValue', metric_type=aggregator.GAUGE, tags=env_tag + common_tags
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_meraki_cloud_controller(aggregator):
run_profile_check('meraki-cloud-controller')
common_tags = common.CHECK_TAGS + [
'snmp_profile:meraki-cloud-controller',
'snmp_host:dashboard.meraki.com',
'device_vendor:meraki',
]
common.assert_common_metrics(aggregator, common_tags)
dev_metrics = ['devStatus', 'devClientCount']
dev_tags = ['device:Gymnasium', 'product:MR16-HW', 'network:L_NETWORK'] + common_tags
for metric in dev_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=dev_tags, count=1)
if_tags = ['interface:wifi0', 'index:4'] + common_tags
if_metrics = ['devInterfaceSentPkts', 'devInterfaceRecvPkts', 'devInterfaceSentBytes', 'devInterfaceRecvBytes']
for metric in if_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
# IF-MIB
if_tags = ['interface:eth0'] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=if_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_idrac(aggregator):
run_profile_check('idrac')
interfaces = ['eth0', 'en1']
common_tags = common.CHECK_TAGS + ['snmp_profile:idrac', 'device_vendor:dell']
common.assert_common_metrics(aggregator, common_tags)
for interface in interfaces:
tags = ['adapter:{}'.format(interface)] + common_tags
for count in ADAPTER_IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(count), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
indexes = ['26', '29']
for index in indexes:
tags = ['chassis_index:{}'.format(index)] + common_tags
for gauge in SYSTEM_STATUS_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
powers = ['supply1', 'supply2']
for power in powers:
tags = ['supply_name:{}'.format(power)] + common_tags
aggregator.assert_metric('snmp.enclosurePowerSupplyState', metric_type=aggregator.GAUGE, tags=tags, count=1)
disks = ['disk1', 'disk2']
for disk in disks:
tags = ['disk_name:{}'.format(disk)] + common_tags
for gauge in DISK_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
batteries = ['battery1', 'battery2']
for battery_name in batteries:
tags = ['battery_name:{}'.format(battery_name)] + common_tags
aggregator.assert_metric('snmp.{}'.format("batteryState"), metric_type=aggregator.GAUGE, tags=tags, count=1)
controllers = ['controller1', 'controller2']
for controller in controllers:
tags = ['controller_name:{}'.format(controller)] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("controllerRollUpStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1
)
devices = ['device1', 'device2']
indexes = ['10', '20']
for device, index in zip(devices, indexes):
tags = ['device_descr_name:{}'.format(device), 'chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.{}'.format("pCIDeviceStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1)
slots = ['slot1', 'slot2']
indexes = ['19', '21']
for slot, index in zip(slots, indexes):
tags = ['slot_name:{}'.format(slot), 'chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.{}'.format("systemSlotStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [('29', 'device2', '0x9e00e0291401'), ('3', 'device1', '0x9e00e0291401')]
for index, device, mac in tag_mappings:
tags = [
'chassis_index:{}'.format(index),
'device_fqdd:{}'.format(device),
'mac_addr:{}'.format(mac),
] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("networkDeviceStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1
)
indexes = ['3', '31']
for index in indexes:
tags = ['chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.{}'.format("systemBIOSStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1)
indexes = ['9', '18']
probe_types = ['26', '26']
for index, probe_type in zip(indexes, probe_types):
tags = ['chassis_index:{}'.format(index), 'probe_type:{}'.format(probe_type)] + common_tags
for gauge in PROBE_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
indexes = ['12', '22']
probe_types = ['6', '3']
for index, probe_type in zip(indexes, probe_types):
tags = ['chassis_index:{}'.format(index), 'probe_type:{}'.format(probe_type)] + common_tags
for gauge in VOLTAGE_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
indexes = ['29', '22']
device_types = ['26', '4']
device_indexes = ['4', '21']
for index, device_type, device_index in zip(indexes, device_types, device_indexes):
tags = [
'chassis_index:{}'.format(index),
'device_type:{}'.format(device_type),
'device_index:{}'.format(device_index),
] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("memoryDeviceStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1
)
for gauge in DRS_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_nexus(aggregator):
profile = "cisco-nexus"
run_profile_check(profile)
interfaces = ["GigabitEthernet1/0/{}".format(i) for i in range(1, 9)]
common_tags = common.CHECK_TAGS + [
'snmp_host:Nexus-eu1.companyname.managed',
'snmp_profile:' + profile,
'device_vendor:cisco',
]
common.assert_common_metrics(aggregator, common_tags)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
aggregator.assert_metric('snmp.cieIfResetCount', metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for interface in interfaces:
tags = ['interface:{}'.format(interface), 'interface_alias:'] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
sensors = [1, 9, 11, 12, 12, 14, 17, 26, 29, 31]
for sensor in sensors:
tags = ['sensor_id:{}'.format(sensor), 'sensor_type:8'] + common_tags
aggregator.assert_metric('snmp.entSensorValue', metric_type=aggregator.GAUGE, tags=tags, count=1)
frus = [6, 7, 15, 16, 19, 27, 30, 31]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpus = [3173, 6692, 11571, 19529, 30674, 38253, 52063, 54474, 55946, 63960]
for cpu in cpus:
tags = ['cpu:{}'.format(cpu)] + common_tags
for metric in CPU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for (index, state) in [(3, 3), (6, 6), (8, 6), (11, 6), (13, 3), (14, 6), (20, 6), (21, 4), (31, 5)]:
aggregator.assert_metric(
'snmp.ciscoEnvMonTemperatureStatusValue',
metric_type=aggregator.GAUGE,
tags=['temp_state:{}'.format(state), 'temp_index:{}'.format(index)] + common_tags,
)
power_supply_tags = ['power_source:1', 'power_status_descr:Jaded driving their their their'] + common_tags
aggregator.assert_metric('snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=power_supply_tags)
fan_indices = [4, 6, 7, 16, 21, 22, 25, 27]
for index in fan_indices:
tags = ['fan_status_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_metric(
'snmp.cswStackPortOperStatus',
metric_type=aggregator.GAUGE,
tags=common_tags + ['interface:GigabitEthernet1/0/1'],
)
aggregator.assert_metric(
'snmp.cswSwitchState', metric_type=aggregator.GAUGE, tags=['mac_addr:0xffffffffffff'] + common_tags
)
frus = [2, 7, 8, 21, 26, 27, 30, 31]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
aggregator.assert_metric(
'snmp.cefcFanTrayOperStatus', metric_type=aggregator.GAUGE, tags=['fru:{}'.format(fru)] + common_tags
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_dell_poweredge(aggregator):
run_profile_check('dell-poweredge')
# Poweredge
sys_mem_gauges = [
'operatingSystemMemoryAvailablePhysicalSize',
'operatingSystemMemoryTotalPageFileSize',
'operatingSystemMemoryAvailablePageFileSize',
'operatingSystemMemoryTotalVirtualSize',
'operatingSystemMemoryAvailableVirtualSize',
]
power_supply_gauges = [
'powerSupplyStatus',
'powerSupplyOutputWatts',
'powerSupplyMaximumInputVoltage',
'powerSupplyCurrentInputVoltage',
]
temperature_probe_gauges = ['temperatureProbeStatus', 'temperatureProbeReading']
processor_device_gauges = ['processorDeviceStatus', 'processorDeviceThreadCount']
cache_device_gauges = ['cacheDeviceStatus', 'cacheDeviceMaximumSize', 'cacheDeviceCurrentSize']
memory_device_gauges = ['memoryDeviceStatus', 'memoryDeviceFailureModes']
idrac_gauges = (
['batteryState', 'controllerRollUpStatus', 'pCIDeviceStatus', 'systemSlotStatus', 'systemBIOSStatus']
+ VOLTAGE_GAUGES
+ PROBE_GAUGES
)
common_tags = common.CHECK_TAGS + [
'snmp_profile:dell-poweredge',
'device_vendor:dell',
]
common.assert_common_metrics(aggregator, common_tags)
chassis_indexes = [29, 31]
for chassis_index in chassis_indexes:
tags = ['chassis_index:{}'.format(chassis_index)] + common_tags
for metric in sys_mem_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [5, 17]
for index in indexes:
tags = ['chassis_index:4', 'index:{}'.format(index)] + common_tags
for metric in power_supply_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [13]
for index in indexes:
tags = ['chassis_index:18', 'index:{}'.format(index)] + common_tags
for metric in temperature_probe_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [17, 28]
for index in indexes:
tags = ['chassis_index:5', 'index:{}'.format(index)] + common_tags
for metric in processor_device_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [15, 27]
for index in indexes:
tags = ['chassis_index:11', 'index:{}'.format(index)] + common_tags
for metric in cache_device_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
serial_numbers = ['forward zombies acted Jaded', 'kept oxen their their oxen oxen']
for serial_number in serial_numbers:
tags = ['serial_number_name:{}'.format(serial_number), 'chassis_index:1'] + common_tags
for metric in memory_device_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
ip_addresses = ['66.97.1.103', '62.148.76.32', '45.3.243.155']
for ip_address in ip_addresses:
tags = ['ip_address:{}'.format(ip_address)] + common_tags
aggregator.assert_metric('snmp.networkDeviceStatus', metric_type=aggregator.GAUGE, tags=tags, at_least=1)
# Intel Adapter
interfaces = ['eth0', 'en1']
for interface in interfaces:
tags = ['adapter:{}'.format(interface)] + common_tags
for count in ADAPTER_IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(count), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
# IDRAC
indexes = ['26', '29']
for index in indexes:
tags = ['chassis_index:{}'.format(index)] + common_tags
for gauge in SYSTEM_STATUS_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
powers = ['supply1', 'supply2']
for power in powers:
tags = ['supply_name:{}'.format(power)] + common_tags
aggregator.assert_metric('snmp.enclosurePowerSupplyState', metric_type=aggregator.GAUGE, tags=tags, count=1)
disks = ['disk1', 'disk2']
for disk in disks:
tags = ['disk_name:{}'.format(disk)] + common_tags
for gauge in DISK_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
for gauge in idrac_gauges:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_hp_ilo4(aggregator):
profile = "hp-ilo4"
run_profile_check(profile)
status_gauges = [
'cpqHeCritLogCondition',
'cpqHeCorrMemLogStatus',
'cpqHeCorrMemLogCondition',
'cpqHeAsrStatus',
'cpqHeAsrPost',
'cpqHeAsrCondition',
'cpqHeAsrNetworkAccessStatus',
'cpqHeThermalCondition',
'cpqHeThermalTempStatus',
'cpqHeThermalSystemFanStatus',
'cpqHeThermalCpuFanStatus',
'cpqNicVtVirusActivity',
'cpqSm2CntlrServerPowerState',
'cpqSm2CntlrBatteryStatus',
'cpqSm2CntlrRemoteSessionStatus',
'cpqSm2CntlrInterfaceStatus',
]
cpqhlth_counts = ['cpqHeAsrRebootCount', 'cpqHeCorrMemTotalErrs']
cpqhlth_gauges = ['cpqHeSysUtilEisaBusMin', 'cpqHePowerMeterCurrReading', 'cpqHeSysUtilLifeTime']
cpqsm2_gauges = [
'cpqSm2CntlrBatteryPercentCharged',
'cpqSm2CntlrSelfTestErrors',
'cpqSm2EventTotalEntries',
]
EMBEDDED = 2
PCMCIA = 3
card_locations = [EMBEDDED, PCMCIA]
network_card_counts = [
'cpqSm2NicXmitBytes',
'cpqSm2NicXmitTotalPackets',
'cpqSm2NicXmitDiscardPackets',
'cpqSm2NicXmitErrorPackets',
'cpqSm2NicXmitQueueLength',
'cpqSm2NicRecvBytes',
'cpqSm2NicRecvTotalPackets',
'cpqSm2NicRecvDiscardPackets',
'cpqSm2NicRecvErrorPackets',
'cpqSm2NicRecvUnknownPackets',
]
interfaces = ['eth0', 'en1']
phys_adapter_counts = [
'cpqNicIfPhysAdapterGoodTransmits',
'cpqNicIfPhysAdapterGoodReceives',
'cpqNicIfPhysAdapterBadTransmits',
'cpqNicIfPhysAdapterBadReceives',
'cpqNicIfPhysAdapterInOctets',
'cpqNicIfPhysAdapterOutOctets',
]
phys_adapter_gauges = ['cpqNicIfPhysAdapterSpeed', 'cpqNicIfPhysAdapterSpeedMbps']
temperature_sensors = [1, 13, 28]
batteries = [1, 3, 4, 5]
common_tags = common.CHECK_TAGS + ['snmp_profile:' + profile, 'device_vendor:hp']
common.assert_common_metrics(aggregator, common_tags)
for metric in status_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in cpqhlth_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in cpqhlth_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in cpqsm2_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for index in temperature_sensors:
tags = ['temperature_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.cpqHeTemperatureCelsius', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.cpqHeTemperatureCondition', metric_type=aggregator.GAUGE, tags=tags, count=1)
for index in batteries:
tags = ['battery_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.cpqHeSysBatteryCondition', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.cpqHeSysBatteryStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
for location in card_locations:
tags = ['nic_stats_location:{}'.format(location)] + common_tags
for metric in network_card_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in phys_adapter_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in phys_adapter_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
drive_counts = [
"cpqDaPhyDrvUsedReallocs",
"cpqDaPhyDrvRefHours",
"cpqDaPhyDrvHardReadErrs",
"cpqDaPhyDrvRecvReadErrs",
"cpqDaPhyDrvHardWriteErrs",
"cpqDaPhyDrvRecvWriteErrs",
"cpqDaPhyDrvHSeekErrs",
"cpqDaPhyDrvSeekErrs",
]
drive_gauges = [
"cpqDaPhyDrvStatus",
"cpqDaPhyDrvFactReallocs",
"cpqDaPhyDrvSpinupTime",
"cpqDaPhyDrvSize",
"cpqDaPhyDrvSmartStatus",
"cpqDaPhyDrvCurrentTemperature",
]
drive_idx = [(0, 2), (0, 28), (8, 31), (9, 24), (9, 28), (10, 17), (11, 4), (12, 20), (18, 22), (23, 2)]
for drive_cntrl_idx, drive_index in drive_idx:
tags = ['drive_cntrl_idx:{}'.format(drive_cntrl_idx), "drive_index:{}".format(drive_index)] + common_tags
for metric in drive_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in drive_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_proliant(aggregator):
run_profile_check('hpe-proliant')
common_tags = common.CHECK_TAGS + ['snmp_profile:hpe-proliant', 'device_vendor:hp']
common.assert_common_metrics(aggregator, common_tags)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
cpu_gauges = [
"cpqSeCpuSlot",
"cpqSeCpuSpeed",
"cpqSeCpuStatus",
"cpqSeCpuExtSpeed",
"cpqSeCpuCore",
"cpqSeCPUCoreMaxThreads",
"cpqSeCpuPrimary",
]
cpu_indexes = [0, 4, 6, 8, 13, 15, 26, 27]
for idx in cpu_indexes:
tags = ['cpu_index:{}'.format(idx)] + common_tags
for metric in cpu_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpu_util_gauges = ["cpqHoCpuUtilMin", "cpqHoCpuUtilFiveMin", "cpqHoCpuUtilThirtyMin", "cpqHoCpuUtilHour"]
cpu_unit_idx = [4, 7, 13, 20, 22, 23, 29]
for idx in cpu_unit_idx:
tags = ['cpu_unit_index:{}'.format(idx)] + common_tags
for metric in cpu_util_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
file_sys_gauges = [
"cpqHoFileSysSpaceTotal",
"cpqHoFileSysSpaceUsed",
"cpqHoFileSysPercentSpaceUsed",
"cpqHoFileSysAllocUnitsTotal",
"cpqHoFileSysAllocUnitsUsed",
"cpqHoFileSysStatus",
]
file_sys_idx = [5, 8, 11, 15, 19, 21, 28, 30]
for idx in file_sys_idx:
tags = ['file_sys_index:{}'.format(idx)] + common_tags
for metric in file_sys_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
memory_gauges = [
"cpqSiMemModuleSize",
"cpqSiMemModuleType",
"cpqSiMemModuleSpeed",
"cpqSiMemModuleTechnology",
"cpqSiMemModuleECCStatus",
"cpqSiMemModuleFrequency",
"cpqSiMemModuleCellStatus",
]
memory_idx = [(6, 16), (7, 17), (7, 30), (8, 20), (10, 4), (15, 27), (20, 14), (21, 14), (23, 0), (28, 20)]
for board_idx, mem_module_index in memory_idx:
tags = ['mem_board_index:{}'.format(board_idx), "mem_module_index:{}".format(mem_module_index)] + common_tags
for metric in memory_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
drive_counts = [
"cpqDaPhyDrvUsedReallocs",
"cpqDaPhyDrvRefHours",
"cpqDaPhyDrvHardReadErrs",
"cpqDaPhyDrvRecvReadErrs",
"cpqDaPhyDrvHardWriteErrs",
"cpqDaPhyDrvRecvWriteErrs",
"cpqDaPhyDrvHSeekErrs",
"cpqDaPhyDrvSeekErrs",
]
drive_gauges = [
"cpqDaPhyDrvStatus",
"cpqDaPhyDrvFactReallocs",
"cpqDaPhyDrvSpinupTime",
"cpqDaPhyDrvSize",
"cpqDaPhyDrvSmartStatus",
"cpqDaPhyDrvCurrentTemperature",
]
drive_idx = [(0, 2), (0, 28), (8, 31), (9, 24), (9, 28), (10, 17), (11, 4), (12, 20), (18, 22), (23, 2)]
for drive_cntrl_idx, drive_index in drive_idx:
tags = ['drive_cntrl_idx:{}'.format(drive_cntrl_idx), "drive_index:{}".format(drive_index)] + common_tags
for metric in drive_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in drive_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
interfaces = [
('eth0', 'quaintly zombies quaintly forward'),
('eth1', 'quaintly but quaintly quaintly'),
]
for interface, desc in interfaces:
if_tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(desc)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=if_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
mem_boards = ['11', '12']
for board in mem_boards:
tags = ['mem_board_index:{}'.format(board)] + common_tags
aggregator.assert_metric('snmp.cpqHeResMem2ModuleCondition', metric_type=aggregator.GAUGE, tags=tags, count=1)
adapter_gauges = ['cpqNicIfPhysAdapterStatus', 'cpqNicIfPhysAdapterState']
for gauge in adapter_gauges:
tags = ['adapter_name:adapter', 'adapter_mac_addr:mac'] + common_tags
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
power_metrics = [
'cpqHeFltTolPowerSupplyStatus',
'cpqHeFltTolPowerSupplyCapacityUsed',
'cpqHeFltTolPowerSupplyCapacityMaximum',
]
for gauge in power_metrics:
tags = ['chassis_num:30'] + common_tags
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
controller_index = ['controller_index:3'] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("cpqDaCntlrCondition"), metric_type=aggregator.GAUGE, tags=controller_index, count=1
)
thermal_metrics = ['cpqHeThermalCondition', 'cpqHeSysUtilLifeTime', 'cpqHeFltTolPwrSupplyStatus']
for metric in thermal_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_generic_host_resources(aggregator):
instance = common.generate_instance_config([])
instance['community_string'] = 'generic_host'
instance['enforce_mib_constraints'] = False
instance['profile'] = 'generic'
init_config = {'profiles': {'generic': {'definition_file': '_generic-host-resources.yaml'}}}
check = SnmpCheck('snmp', init_config, [instance])
check.check(instance)
common_tags = common.CHECK_TAGS + ['snmp_profile:generic']
common.assert_common_metrics(aggregator, common_tags)
sys_metrics = [
'snmp.hrSystemUptime',
'snmp.hrSystemNumUsers',
'snmp.hrSystemProcesses',
'snmp.hrSystemMaxProcesses',
]
for metric in sys_metrics:
aggregator.assert_metric(metric, metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_metric('snmp.hrStorageAllocationUnits', count=2)
aggregator.assert_metric('snmp.hrStorageSize', count=2)
aggregator.assert_metric('snmp.hrStorageUsed', count=2)
aggregator.assert_metric('snmp.hrStorageAllocationFailures', count=2)
aggregator.assert_metric('snmp.hrProcessorLoad', count=2)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_palo_alto(aggregator):
profile = "palo-alto"
run_profile_check(profile)
common_tags = common.CHECK_TAGS + [
'snmp_profile:' + profile,
'device_vendor:paloaltonetworks',
]
common.assert_common_metrics(aggregator, common_tags)
session = [
'panSessionUtilization',
'panSessionMax',
'panSessionActive',
'panSessionActiveTcp',
'panSessionActiveUdp',
'panSessionActiveICMP',
'panSessionActiveSslProxy',
'panSessionSslProxyUtilization',
]
global_protect = [
'panGPGWUtilizationPct',
'panGPGWUtilizationMaxTunnels',
'panGPGWUtilizationActiveTunnels',
]
entity = [
'panEntityTotalPowerAvail',
'panEntityTotalPowerUsed',
]
entry = ['panEntryFRUModulePowerUsed', 'panEntryFRUModuleNumPorts']
for metric in session:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in global_protect:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in entity:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in entry:
# Needs cross table entPhysicalIsFRU tag
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags)
# Needs cross table entLogicalDescr tag
aggregator.assert_metric('snmp.panEntryFanTrayPowerUsed', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_asa_all(aggregator):
profile = "cisco-asa"
assert_cisco_asa(aggregator, profile)
@pytest.mark.usefixtures("dd_environment")
def test_cisco_asa_5525(aggregator):
profile = "cisco-asa-5525"
assert_cisco_asa(aggregator, profile)
def assert_cisco_asa(aggregator, profile):
run_profile_check(profile)
common_tags = common.CHECK_TAGS + [
'snmp_profile:' + profile,
'snmp_host:kept',
'device_vendor:cisco',
]
common.assert_common_metrics(aggregator, common_tags)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
if_tags = ['interface:eth0'] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=if_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
aggregator.assert_metric('snmp.cieIfResetCount', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1)
frus = [3, 4, 5, 7, 16, 17, 24, 25]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpus = [7746]
for cpu in cpus:
tags = ['cpu:{}'.format(cpu)] + common_tags
for metric in CPU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
sensor_tags = ['sensor_id:31', 'sensor_type:9'] + common_tags
aggregator.assert_metric('snmp.entPhySensorValue', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
stat_tags = [(20, 2), (5, 5)]
for (svc, stat) in stat_tags:
aggregator.assert_metric(
'snmp.cfwConnectionStatValue',
metric_type=aggregator.GAUGE,
tags=['stat_type:{}'.format(stat), 'service_type:{}'.format(svc)] + common_tags,
)
aggregator.assert_metric('snmp.crasNumDeclinedSessions', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.crasNumSessions', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.crasNumUsers', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric(
'snmp.crasNumSetupFailInsufResources', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags
)
aggregator.assert_metric('snmp.cipSecGlobalActiveTunnels', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.cipSecGlobalHcInOctets', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags)
aggregator.assert_metric('snmp.cipSecGlobalHcOutOctets', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags)
for (index, state) in [(3, 3), (6, 6), (8, 6), (11, 6), (13, 3), (14, 6), (20, 6), (21, 4), (31, 5)]:
aggregator.assert_metric(
'snmp.ciscoEnvMonTemperatureStatusValue',
metric_type=aggregator.GAUGE,
tags=['temp_state:{}'.format(state), 'temp_index:{}'.format(index)] + common_tags,
)
power_supply_tags = ['power_source:1', 'power_status_descr:Jaded driving their their their'] + common_tags
aggregator.assert_metric('snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=power_supply_tags)
fan_indices = [4, 6, 7, 16, 21, 22, 25, 27]
for index in fan_indices:
tags = ['fan_status_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE)
aggregator.assert_metric(
'snmp.cswSwitchState', metric_type=aggregator.GAUGE, tags=['mac_addr:0xffffffffffff'] + common_tags
)
frus = [2, 7, 8, 21, 26, 27, 30, 31]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
aggregator.assert_metric(
'snmp.cefcFanTrayOperStatus', metric_type=aggregator.GAUGE, tags=['fru:{}'.format(fru)] + common_tags
)
for metrics in MEMORY_METRICS:
tags = ['mem_pool_name:test_pool'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metrics), metric_type=aggregator.GAUGE, tags=tags)
for conn in [1, 2, 5]:
conn_tags = ['connection_type:{}'.format(conn)] + common_tags
aggregator.assert_metric('snmp.cfwConnectionStatCount', metric_type=aggregator.RATE, tags=conn_tags)
hardware_tags = [(3, 'Secondary unit'), (5, 'Primary unit'), (6, 'Failover LAN Interface')]
for (htype, hdesc) in hardware_tags:
aggregator.assert_metric(
'snmp.cfwHardwareStatusValue',
metric_type=aggregator.GAUGE,
tags=['hardware_type:{}'.format(htype), 'hardware_desc:{}'.format(hdesc)] + common_tags,
)
for switch in [4684, 4850, 8851, 9997, 15228, 16580, 24389, 30813, 36264]:
aggregator.assert_metric(
'snmp.cvsChassisUpTime',
metric_type=aggregator.GAUGE,
tags=['chassis_switch_id:{}'.format(switch)] + common_tags,
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# RTT
rtt_indexes = [1, 7, 10, 13, 15, 18, 20]
rtt_types = [22, 21, 17, 6, 20, 8, 16]
rtt_states = [3, 1, 6, 4, 6, 1, 6]
rtt_gauges = ['rttMonLatestRttOperCompletionTime', 'rttMonLatestRttOperSense', 'rttMonCtrlOperTimeoutOccurred']
for i in range(len(rtt_indexes)):
tags = [
"rtt_index:{}".format(rtt_indexes[i]),
"rtt_type:{}".format(rtt_types[i]),
"rtt_state:{}".format(rtt_states[i]),
] + common_tags
for rtt in rtt_gauges:
aggregator.assert_metric('snmp.{}'.format(rtt), metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_csr(aggregator):
run_profile_check('cisco-csr1000v')
common_tags = common.CHECK_TAGS + [
'snmp_profile:cisco-csr1000v',
'device_vendor:cisco',
]
common.assert_common_metrics(aggregator, common_tags)
tags = ['neighbor:244.12.239.177'] + common_tags
for metric in PEER_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
for metric in PEER_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
@pytest.mark.usefixtures("dd_environment")
def test_checkpoint_firewall(aggregator):
run_profile_check('checkpoint-firewall')
common_tags = common.CHECK_TAGS + [
'snmp_profile:checkpoint-firewall',
'device_vendor:checkpoint',
]
common.assert_common_metrics(aggregator, common_tags)
cpu_metrics = [
'multiProcUserTime',
'multiProcSystemTime',
'multiProcIdleTime',
'multiProcUsage',
]
cpu_cores = [7097, 13039, 13761, 28994, 29751, 33826, 40053, 48847, 61593, 65044]
for core in cpu_cores:
tags = ['cpu_core:{}'.format(core)] + common_tags
for metric in cpu_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_metric('snmp.procNum', metric_type=aggregator.GAUGE, tags=common_tags)
mem_metrics = ['memTotalReal64', 'memActiveReal64', 'memFreeReal64', 'memTotalVirtual64', 'memActiveVirtual64']
for metric in mem_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags)
disk_metrics = [
'multiDiskSize',
'multiDiskUsed',
'multiDiskFreeTotalBytes',
'multiDiskFreeAvailableBytes',
'multiDiskFreeTotalPercent',
'multiDiskFreeAvailablePercent',
]
appliance_metrics = [
'fanSpeedSensorValue',
'fanSpeedSensorStatus',
'tempertureSensorValue',
'tempertureSensorStatus',
]
common_indices = range(10)
common_names = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth']
for idx in common_indices:
name = common_names[idx]
tags = ['disk_index:{}'.format(idx), 'disk_name:{}'.format(name)] + common_tags
for metric in disk_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
tags = ['sensor_index:{}'.format(idx), 'sensor_name:{}'.format(name)] + common_tags
for metric in appliance_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
fw_count_metrics = ['fwAccepted', 'fwDropped', 'fwRejected']
for metric in fw_count_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags)
fw_gauge_metrics = ['fwNumConn', 'fwPeakNumConn']
for metric in fw_gauge_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_arista(aggregator):
run_profile_check('arista')
common_tags = common.CHECK_TAGS + ['snmp_profile:arista', 'device_vendor:arista']
common.assert_common_metrics(aggregator, common_tags)
aggregator.assert_metric(
'snmp.aristaEgressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:13', 'queue_index:10'],
count=1,
)
aggregator.assert_metric(
'snmp.aristaEgressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:28', 'queue_index:22'],
count=1,
)
aggregator.assert_metric(
'snmp.aristaIngressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:7', 'queue_index:25'],
count=1,
)
aggregator.assert_metric(
'snmp.aristaIngressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:8', 'queue_index:24'],
count=1,
)
for (sensor_id, sensor_type) in [(1, 11), (7, 8)]:
sensor_tags = ['sensor_id:{}'.format(sensor_id), 'sensor_type:{}'.format(sensor_type)] + common_tags
aggregator.assert_metric('snmp.entPhySensorValue', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
aggregator.assert_metric('snmp.entPhySensorOperStatus', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_aruba(aggregator):
run_profile_check('aruba')
common_tags = common.CHECK_TAGS + ['snmp_profile:aruba', 'device_vendor:aruba']
common.assert_common_metrics(aggregator, common_tags)
for fan in [18, 28]:
fan_tags = common_tags + ['fan_index:{}'.format(fan)]
aggregator.assert_metric('snmp.sysExtFanStatus', metric_type=aggregator.GAUGE, tags=fan_tags, count=1)
for psu in [1, 17]:
psu_tags = common_tags + ['powersupply_index:{}'.format(psu)]
aggregator.assert_metric('snmp.sysExtPowerSupplyStatus', metric_type=aggregator.GAUGE, tags=psu_tags, count=1)
for proc in [11, 26]:
proc_tags = common_tags + ['processor_index:{}'.format(proc)]
aggregator.assert_metric('snmp.sysExtProcessorLoad', metric_type=aggregator.GAUGE, tags=proc_tags, count=1)
for mem in [3, 20]:
mem_tags = common_tags + ['memory_index:{}'.format(mem)]
aggregator.assert_metric('snmp.sysExtMemorySize', metric_type=aggregator.GAUGE, tags=mem_tags, count=1)
aggregator.assert_metric('snmp.sysExtMemoryUsed', metric_type=aggregator.GAUGE, tags=mem_tags, count=1)
aggregator.assert_metric('snmp.sysExtMemoryFree', metric_type=aggregator.GAUGE, tags=mem_tags, count=1)
aggregator.assert_metric(
'snmp.wlsxSysExtPacketLossPercent', metric_type=aggregator.GAUGE, tags=common_tags, count=1
)
# OSPF metrics
neighbor_metrics = [
('ospfNbrEvents', aggregator.RATE),
('ospfNbrState', aggregator.GAUGE),
('ospfNbrLsRetransQLen', aggregator.GAUGE),
]
for metric, metric_type in neighbor_metrics:
tags = ['neighbor_ip:192.29.116.26', 'neighbor_id:192.29.66.79'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=metric_type, tags=tags, count=1)
virtual_neighbor_metrics = [
('ospfVirtNbrState', aggregator.GAUGE),
('ospfVirtNbrEvents', aggregator.RATE),
('ospfVirtNbrLsRetransQLen', aggregator.GAUGE),
]
for metric, metric_type in virtual_neighbor_metrics:
for ip, nbr in [('74.210.82.1', '194.154.66.112'), ('122.226.86.1', '184.201.101.140')]:
tags = ['neighbor_ip:{}'.format(ip), 'neighbor_id:{}'.format(nbr)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=metric_type, tags=tags, count=1)
lls_metrics = ['ospfIfRetransInterval', 'ospfIfState', 'ospfIfLsaCount']
for metric in lls_metrics:
for ip, nbr in [('58.115.169.188', '192.29.66.79'), ('18.2.8.29', '118.246.193.247')]:
tags = ['ospf_ip_addr:{}'.format(ip), 'neighbor_id:{}'.format(nbr)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
virtual_lls_metrics = ['ospfVirtIfRetransInterval', 'ospfVirtIfState', 'ospfVirtIfLsaCount']
for metric in virtual_lls_metrics:
for nbr in ['194.154.66.112', '184.201.101.140']:
tags = ['neighbor_id:{}'.format(nbr)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_chatsworth(aggregator):
profile = "chatsworth_pdu"
run_profile_check(profile)
# Legacy global tags are applied to all metrics
legacy_global_tags = [
'legacy_pdu_macaddress:00:0E:D3:AA:CC:EE',
'legacy_pdu_model:P10-1234-ABC',
'legacy_pdu_name:legacy-name1',
'legacy_pdu_version:1.2.3',
]
common_tags = common.CHECK_TAGS + legacy_global_tags + ['snmp_profile:' + profile, 'device_vendor:chatsworth']
common.assert_common_metrics(aggregator, common_tags)
# Legacy metrics
legacy_pdu_tags = common_tags
legacy_pdu_gauge_metrics = [
'snmp.pduRole',
'snmp.outOfService',
]
legacy_pdu_monotonic_count_metrics = []
for line in range(1, 4):
legacy_pdu_gauge_metrics.append('snmp.line{}curr'.format(line))
for branch in range(1, 3):
legacy_pdu_gauge_metrics.append('snmp.temperatureProbe{}'.format(branch))
legacy_pdu_gauge_metrics.append('snmp.humidityProbe{}'.format(branch))
for xyz in ['xy', 'yz', 'zx']:
legacy_pdu_monotonic_count_metrics.append('snmp.energy{}{}s'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.voltage{}{}'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.power{}{}'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.powerFact{}{}'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.current{}{}'.format(xyz, branch))
for branch in range(1, 25):
legacy_pdu_monotonic_count_metrics.append('snmp.receptacleEnergyoutlet{}s'.format(branch))
legacy_pdu_gauge_metrics.append('snmp.outlet{}Current'.format(branch))
for metric in legacy_pdu_gauge_metrics:
aggregator.assert_metric(metric, metric_type=aggregator.GAUGE, tags=legacy_pdu_tags, count=1)
for metric in legacy_pdu_monotonic_count_metrics:
aggregator.assert_metric(metric, metric_type=aggregator.MONOTONIC_COUNT, tags=legacy_pdu_tags, count=1)
# New metrics
pdu_tags = common_tags + [
'pdu_cabinetid:cab1',
'pdu_ipaddress:42.2.210.224',
'pdu_macaddress:0x00249b3503f6',
'pdu_model:model1',
'pdu_name:name1',
'pdu_version:v1.1',
]
aggregator.assert_metric('snmp.cpiPduNumberBranches', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduNumberOutlets', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutOfService', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduUpgrade', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduChainRole', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduTotalPower', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
for lock in [1, 2]:
lock_tags = common_tags + ['lock_id:{}'.format(lock)]
aggregator.assert_metric('snmp.cpiPduEasStatus', metric_type=aggregator.GAUGE, tags=lock_tags, count=1)
aggregator.assert_metric('snmp.cpiPduDoorStatus', metric_type=aggregator.GAUGE, tags=lock_tags, count=1)
aggregator.assert_metric('snmp.cpiPduLockStatus', metric_type=aggregator.GAUGE, tags=lock_tags, count=1)
for (sensor_name, sensor_index) in [('sensor1', 4), ('sensor2', 6)]:
sensor_tags = common_tags + [
'sensor_index:{}'.format(sensor_index),
'sensor_name:{}'.format(sensor_name),
'sensor_type:1',
]
aggregator.assert_metric('snmp.cpiPduSensorValue', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
for line in [6, 18]:
line_tags = common_tags + ['line_id:{}'.format(line)]
aggregator.assert_metric('snmp.cpiPduLineCurrent', metric_type=aggregator.GAUGE, tags=line_tags, count=1)
for branch in [1, 17]:
branch_tags = common_tags + ['branch_id:{}'.format(branch), 'pdu_name:name1']
aggregator.assert_metric('snmp.cpiPduBranchCurrent', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric('snmp.cpiPduBranchMaxCurrent', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric('snmp.cpiPduBranchVoltage', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric('snmp.cpiPduBranchPower', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric(
'snmp.cpiPduBranchPowerFactor', metric_type=aggregator.GAUGE, tags=branch_tags, count=1
)
aggregator.assert_metric('snmp.cpiPduBranchStatus', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric(
'snmp.cpiPduBranchEnergy', metric_type=aggregator.MONOTONIC_COUNT, tags=branch_tags, count=1
)
for branch in [1]:
branch_tags = common_tags + ['branch_id:{}'.format(branch), 'pdu_name:name2']
aggregator.assert_metric(
'snmp.cpiPduBranchPowerFactor', metric_type=aggregator.GAUGE, tags=branch_tags, count=1
)
aggregator.assert_metric(
'snmp.cpiPduBranchEnergy', metric_type=aggregator.MONOTONIC_COUNT, tags=branch_tags, count=1
)
for (outlet_id, outlet_branch, outlet_name) in [(7, 29, 'outlet1'), (16, 23, 'outlet2')]:
outlet_tags = common_tags + [
'outlet_id:{}'.format(outlet_id),
'outlet_branchid:{}'.format(outlet_branch),
'outlet_name:{}'.format(outlet_name),
]
aggregator.assert_metric('snmp.cpiPduOutletCurrent', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutletVoltage', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutletPower', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutletStatus', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric(
'snmp.cpiPduOutletEnergy', metric_type=aggregator.MONOTONIC_COUNT, tags=outlet_tags, count=1
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_isilon(aggregator):
run_profile_check('isilon')
common_tags = common.CHECK_TAGS + [
'snmp_profile:isilon',
'cluster_name:testcluster1',
'node_name:node1',
'node_type:1',
'device_vendor:dell',
]
cluster_rates = [
'clusterIfsInBytes',
'clusterIfsOutBytes',
]
node_rates = [
'nodeIfsOutBytes',
'nodeIfsInBytes',
]
protocol_metrics = [
'protocolOpsPerSecond',
'latencyMin',
'latencyMax',
'latencyAverage',
]
quota_metrics = ['quotaHardThreshold', 'quotaSoftThreshold', 'quotaUsage', 'quotaAdvisoryThreshold']
quota_ids_types = [
(422978632, 1),
(153533730, 5),
(3299369987, 4),
(2149993012, 3),
(1424325378, 1),
(4245321451, 0),
(2328145711, 1),
(1198032230, 4),
(1232918362, 1),
(1383990869, 1),
]
common.assert_common_metrics(aggregator, common_tags)
for metric in quota_metrics:
for qid, qtype in quota_ids_types:
tags = ['quota_id:{}'.format(qid), 'quota_type:{}'.format(qtype)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in protocol_metrics:
for num in range(1, 3):
tags = ['protocol_name:testprotocol{}'.format(num)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.clusterHealth', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in cluster_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=common_tags, count=1)
aggregator.assert_metric('snmp.nodeHealth', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in node_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=common_tags, count=1)
for fan in [4, 6, 10, 11, 14, 21, 22, 23, 25, 30]:
tags = ['fan_name:testfan', 'fan_number:{}'.format(fan)] + common_tags
aggregator.assert_metric('snmp.fanSpeed', metric_type=aggregator.GAUGE, tags=tags, count=1)
for status, bay in [('SMARTFAIL', 1), ('HEALTHY', 2), ('DEAD', 3)]:
tags = common_tags + ['disk_status:{}'.format(status), 'disk_bay:{}'.format((bay))]
aggregator.assert_metric('snmp.diskSizeBytes', metric_type=aggregator.RATE, tags=tags)
aggregator.assert_metric('snmp.ifsUsedBytes', metric_type=aggregator.RATE, tags=common_tags, count=1)
aggregator.assert_metric('snmp.ifsTotalBytes', metric_type=aggregator.RATE, tags=common_tags, count=1)
@pytest.mark.usefixtures("dd_environment")
def test_apc_ups(aggregator):
run_profile_check('apc_ups')
profile_tags = [
'snmp_profile:apc_ups',
'model:APC Smart-UPS 600',
'firmware_version:2.0.3-test',
'serial_num:test_serial',
'ups_name:testIdentName',
'device_vendor:apc',
]
tags = common.CHECK_TAGS + profile_tags
metrics = [
'upsAdvBatteryNumOfBadBattPacks',
'upsAdvBatteryReplaceIndicator',
'upsAdvBatteryRunTimeRemaining',
'upsAdvBatteryTemperature',
'upsAdvBatteryCapacity',
'upsHighPrecInputFrequency',
'upsHighPrecInputLineVoltage',
'upsHighPrecOutputCurrent',
'upsAdvInputLineFailCause',
'upsAdvOutputLoad',
'upsBasicBatteryTimeOnBattery',
'upsAdvTestDiagnosticsResults',
]
common.assert_common_metrics(aggregator, tags)
for metric in metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric(
'snmp.upsOutletGroupStatusGroupState',
metric_type=aggregator.GAUGE,
tags=['outlet_group_name:test_outlet'] + tags,
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.AVRTrimActive', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.BatteriesDischarged', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.LowBatteryOnBattery', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.NoBatteriesAttached', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.OnLine', 0, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.ReplaceBattery', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric('snmp.upsBasicStateOutputState.On', 1, metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_fortinet_fortigate(aggregator):
run_profile_check('fortinet-fortigate')
common_tags = common.CHECK_TAGS + [
'snmp_profile:fortinet-fortigate',
'device_vendor:fortinet',
]
common_gauge_metrics = [
'fgSysCpuUsage',
'fgSysMemUsage',
'fgSysMemCapacity',
'fgSysLowMemUsage',
'fgSysLowMemCapacity',
'fgSysDiskUsage',
'fgSysDiskCapacity',
'fgSysSesCount',
'fgSysSesRate1',
'fgSysSes6Count',
'fgSysSes6Rate1',
'fgApHTTPConnections',
'fgApHTTPMaxConnections',
'fgVdNumber',
'fgVdMaxVdoms',
]
processor_gauge_metrics = [
'fgProcessorUsage',
'fgProcessorSysUsage',
]
processor_count_metrics = [
'fgProcessorPktRxCount',
'fgProcessorPktTxCount',
'fgProcessorPktDroppedCount',
]
processor_tags = common_tags + ['processor_index:12']
vd_metrics = [
'fgVdEntOpMode',
'fgVdEntHaState',
'fgVdEntCpuUsage',
'fgVdEntMemUsage',
'fgVdEntSesCount',
'fgVdEntSesRate',
]
vd_tags = common_tags + ['virtualdomain_index:4', 'virtualdomain_name:their oxen quaintly']
common.assert_common_metrics(aggregator, common_tags)
for metric in common_gauge_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in processor_gauge_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=processor_tags, count=1)
for metric in processor_count_metrics:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=processor_tags, count=1
)
aggregator.assert_metric(
'snmp.{}.rate'.format(metric), metric_type=aggregator.RATE, tags=processor_tags, count=1
)
for metric in vd_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=vd_tags, count=1)
# Interface
aggregator.assert_metric('snmp.fgIntfEntVdom', metric_type=aggregator.GAUGE, count=1)
# Firewall
firewall_tags = common_tags + ['policy_index:22']
for metric in ['fgFwPolPktCount', 'fgFwPolByteCount']:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=firewall_tags, count=1
)
aggregator.assert_metric(
'snmp.{}.rate'.format(metric), metric_type=aggregator.RATE, tags=firewall_tags, count=1
)
# Firewall 6
firewall6_tags = common_tags + ['policy6_index:29']
for metric in ['fgFwPol6PktCount', 'fgFwPol6ByteCount']:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=firewall6_tags, count=1
)
aggregator.assert_metric(
'snmp.{}.rate'.format(metric), metric_type=aggregator.RATE, tags=firewall6_tags, count=1
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_netapp(aggregator):
run_profile_check('netapp')
profile_tags = [
'snmp_profile:netapp',
'snmp_host:example-datacenter.company',
'device_vendor:netapp',
]
common_tags = common.CHECK_TAGS + profile_tags
common.assert_common_metrics(aggregator, common_tags)
gauges = [
'cfInterconnectStatus',
'miscCacheAge',
'ncHttpActiveCliConns',
]
counts = [
'extcache64Hits',
]
for metric in gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
snapvault_counts = [
'svTotalFailures',
]
snapvaults = [('5', '/vol/dir1', '5'), ('6', '/vol/dir3', '2'), ('18', '/vol/dir9', '4')]
for metric in snapvault_counts:
for index, destination, state in snapvaults:
tags = [
'index:{}'.format(index),
'destination:{}'.format(destination),
'state:{}'.format(state),
] + common_tags
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
snapmirrors = [('6', '1'), ('9', '5'), ('29', '1')]
snapmirror_gauges = [
'snapmirrorLag',
]
snapmirror_counts = [
'snapmirrorTotalFailures',
]
for index, state in snapmirrors:
tags = ['index:{}'.format(index), 'state:{}'.format(state)] + common_tags
for metric in snapmirror_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in snapmirror_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
filesystem_gauges = [
'dfHighTotalKBytes',
'dfHighAvailKBytes',
'dfInodesUsed',
'dfInodesFree',
]
filesystem_indexes = [
'1022',
'1023',
'1024',
'1025',
'1026',
'1027',
'1028',
'1029',
'1032',
'1033',
]
filesystems = ['/vol/dir{}'.format(n) for n in range(1, len(filesystem_indexes) + 1)]
for metric in filesystem_gauges:
for index, filesystem in zip(filesystem_indexes, filesystems):
tags = ['index:{}'.format(index), 'filesystem:{}'.format(filesystem)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
if_counts = [
'ifHighInOctets',
]
if_rates = [
'ifHighInOctets.rate',
]
interfaces = [
# Interface descriptions will be normalized in the backend, but we receive the raw DisplayString values here.
('6', 'netgear ifX300 v1'),
('7', 'junyper proto12 12.3'),
('23', 'malabar yz42 10.2020'),
]
for index, descr in interfaces:
tags = ['index:{}'.format(index), 'interface:{}'.format(descr)] + common_tags
for metric in if_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in if_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_catalyst(aggregator):
run_profile_check('cisco-catalyst')
common_tags = common.CHECK_TAGS + [
'snmp_host:catalyst-6000.example',
'snmp_profile:cisco-catalyst',
'device_vendor:cisco',
]
sensors = [5, 9]
for sensor in sensors:
tags = ['sensor_id:{}'.format(sensor), 'sensor_type:10'] + common_tags
aggregator.assert_metric('snmp.entSensorValue', metric_type=aggregator.GAUGE, tags=tags, count=1)
interfaces = ["Gi1/0/{}".format(i) for i in [6, 10, 12, 18, 22, 25, 27]]
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in CIE_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
frus = [1001, 1010, 2001, 2010]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_metric('snmp.devices_monitored', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_juniper_ex(aggregator):
run_profile_check('juniper-ex')
common_tags = common.CHECK_TAGS + [
'snmp_profile:juniper-ex',
'device_vendor:juniper-networks',
]
_check_juniper_virtual_chassis(aggregator, common_tags)
_check_juniper_dcu(aggregator, common_tags)
_check_juniper_cos(aggregator, common_tags)
_check_juniper_firewall(aggregator, common_tags)
aggregator.assert_metric('snmp.devices_monitored', count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_juniper_mx(aggregator):
run_profile_check('juniper-mx')
common_tags = common.CHECK_TAGS + [
'snmp_profile:juniper-mx',
'device_vendor:juniper-networks',
]
_check_juniper_virtual_chassis(aggregator, common_tags)
_check_juniper_firewall(aggregator, common_tags)
aggregator.assert_metric('snmp.devices_monitored', count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_juniper_srx(aggregator):
run_profile_check('juniper-srx')
common_tags = common.CHECK_TAGS + [
'snmp_profile:juniper-srx',
'device_vendor:juniper-networks',
]
_check_juniper_userfirewall(aggregator, common_tags)
_check_juniper_dcu(aggregator, common_tags)
_check_juniper_scu(aggregator, common_tags)
aggregator.assert_metric('snmp.devices_monitored', count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
def _check_juniper_scu(aggregator, common_tags):
"""
Shared testing function for Juniper profiles supporting scu
"""
scu_tags = [
['address_family:1', 'interface:kept but'],
['address_family:1', 'interface:quaintly driving oxen their zombies oxen acted acted'],
['address_family:1', 'interface:but forward kept but their driving oxen quaintly acted'],
]
for metric in SCU_COUNTS:
for tags in scu_tags:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags + tags, count=1
)
def _check_juniper_userfirewall(aggregator, common_tags):
"""
Shared testing function for Juniper profiles supporting userfirewall (user auth)
"""
userfirewall_tags = [
['ldap_domain_name:Mycroft Holmes', 'ldap_host:brother'],
['ldap_domain_name:Jim Moriarty', 'ldap_host:enemy'],
]
for metric in USER_FIREWALL:
for tags in userfirewall_tags:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags + tags, count=1
)
def _check_juniper_dcu(aggregator, common_tags):
"""
Shared testing function for Juniper profiles supporting DCU
"""
dcu_tags = [
[
'address_family:1',
'destination_class_name:their',
'interface:quaintly driving oxen their zombies oxen acted acted',
],
[
'address_family:1',
'destination_class_name:acted but forward acted zombies forward',
'interface:but forward kept but their driving oxen quaintly acted',
],
[
'address_family:2',
'destination_class_name:oxen Jaded oxen Jaded forward kept quaintly',
'interface:kept but',
],
]
for decu_metric in DCU_COUNTS:
for tags in dcu_tags:
aggregator.assert_metric(
'snmp.{}'.format(decu_metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags + tags, count=1
)
def _check_juniper_firewall(aggregator, common_tags):
"""
Shared testing function for Juniper profiles supporting firewall metrics
"""
firewall_tags = [
[
'counter_name:Jaded oxen kept their driving but kept',
'counter_type:4',
'firewall_filter_name:their driving quaintly but Jaded oxen',
],
[
'counter_name:but but but their their their kept kept forward',
'counter_type:4',
'firewall_filter_name:driving kept acted Jaded zombies kept acted',
],
]
for metric in FIREWALL_COUNTS:
for tags in firewall_tags:
aggregator.assert_metric(
'snmp.{}'.format(metric),
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + tags,
count=1,
)
def _check_juniper_virtual_chassis(aggregator, common_tags):
"""
Shared testing function for Juniper profiles supporting virtual chassis metrics
"""
virtual_chassis_tags = [
['port_name:but driving but'],
['port_name:Jaded forward but oxen quaintly their their'],
['port_name:forward forward driving driving Jaded Jaded'],
]
for count_and_rate_metric in VIRTUAL_CHASSIS_COUNTS:
for tags in virtual_chassis_tags:
aggregator.assert_metric(
'snmp.{}'.format(count_and_rate_metric),
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + tags,
count=1,
)
for rate_metric in VIRTUAL_CHASSIS_RATES:
for tags in virtual_chassis_tags:
aggregator.assert_metric(
'snmp.{}'.format(rate_metric), metric_type=aggregator.GAUGE, tags=common_tags + tags, count=1
)
def _check_juniper_cos(aggregator, common_tags):
"""
Shared testing function for Juniper profiles supporting COS metrics
"""
cos_tags = [
['interface:acted oxen oxen forward quaintly kept zombies but oxen', 'queue_number:25'],
['interface:acted kept quaintly acted oxen kept', 'queue_number:50'],
['interface:their', 'queue_number:15'],
]
for cos_metric in COS_COUNTS:
for tags in cos_tags:
aggregator.assert_metric(
'snmp.{}'.format(cos_metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags + tags, count=1
)
for cos_metric in COS_RATES:
for tags in cos_tags:
aggregator.assert_metric(
'snmp.{}'.format(cos_metric), metric_type=aggregator.GAUGE, tags=common_tags + tags, count=1
)
| 40.546934 | 120 | 0.668066 |
import logging
import pytest
from datadog_checks.base import ConfigurationError
from datadog_checks.dev.utils import get_metadata_metrics
from datadog_checks.snmp import SnmpCheck
from datadog_checks.snmp.utils import (
_get_profile_name,
_is_abstract_profile,
_iter_default_profile_file_paths,
get_profile_definition,
recursively_expand_base_profiles,
)
from . import common
from .metrics import (
ADAPTER_IF_COUNTS,
CCCA_ROUTER_GAUGES,
CIE_METRICS,
COS_COUNTS,
COS_RATES,
CPU_METRICS,
DCU_COUNTS,
DISK_GAUGES,
DRS_GAUGES,
FIREWALL_COUNTS,
FRU_METRICS,
IF_BANDWIDTH_USAGE,
IF_COUNTS,
IF_GAUGES,
IF_RATES,
IP_COUNTS,
IP_IF_COUNTS,
IPX_COUNTS,
LTM_GAUGES,
LTM_NODES_COUNTS,
LTM_NODES_GAUGES,
LTM_NODES_RATES,
LTM_POOL_COUNTS,
LTM_POOL_GAUGES,
LTM_POOL_MEMBER_COUNTS,
LTM_POOL_MEMBER_GAUGES,
LTM_POOL_MEMBER_RATES,
LTM_POOL_RATES,
LTM_VIRTUAL_SERVER_COUNTS,
LTM_VIRTUAL_SERVER_GAUGES,
LTM_VIRTUAL_SERVER_RATES,
MEMORY_METRICS,
PEER_GAUGES,
PEER_RATES,
PROBE_GAUGES,
SCU_COUNTS,
SYSTEM_STATUS_GAUGES,
TCP_COUNTS,
TCP_GAUGES,
UDP_COUNTS,
USER_FIREWALL,
VIRTUAL_CHASSIS_COUNTS,
VIRTUAL_CHASSIS_RATES,
VOLTAGE_GAUGES,
)
pytestmark = common.python_autodiscovery_only
def test_load_profiles(caplog):
instance = common.generate_instance_config([])
check = SnmpCheck('snmp', {}, [instance])
caplog.at_level(logging.WARNING)
for name, profile in check.profiles.items():
try:
check._config.refresh_with_profile(profile)
except ConfigurationError as e:
pytest.fail("Profile `{}` is not configured correctly: {}".format(name, e))
assert "table doesn't have a 'metric_tags' section" not in caplog.text
caplog.clear()
def test_profile_hierarchy():
errors = []
compat_base_profiles = ['_base_cisco', '_base_cisco_voice']
for path in _iter_default_profile_file_paths():
name = _get_profile_name(path)
definition = get_profile_definition({'definition_file': path})
extends = definition.get('extends', [])
sysobjectid = definition.get('sysobjectid')
if _is_abstract_profile(name):
if '_base.yaml' in extends and name not in compat_base_profiles:
errors.append("'{}': mixin wrongly extends '_base.yaml'".format(name))
if sysobjectid is not None:
errors.append("'{}': mixin wrongly defines a `sysobjectid`".format(name))
else:
if '_base.yaml' not in extends:
errors.append("'{}': concrete profile must directly extend '_base.yaml'".format(name))
if sysobjectid is None:
errors.append("'{}': concrete profile must define a `sysobjectid`".format(name))
if errors:
pytest.fail('\n'.join(sorted(errors)))
def run_profile_check(recording_name, profile_name=None):
instance = common.generate_instance_config([])
instance['community_string'] = recording_name
instance['enforce_mib_constraints'] = False
check = SnmpCheck('snmp', {}, [instance])
# First, see if recording name is a profile, then use profile as definition.
if profile_name is not None:
profile = check.profiles.get(profile_name)
else:
profile = check.profiles.get(recording_name)
if profile:
try:
test_check = SnmpCheck('snmp', {}, [common.generate_instance_config([])])
test_check._config.refresh_with_profile(profile)
except ConfigurationError as e:
pytest.fail("Profile `{}` is not configured correctly: {}".format(recording_name, e))
check.check(instance)
@pytest.mark.unit
@pytest.mark.parametrize(
'definition_file, equivalent_definition',
[
pytest.param('_base_cisco.yaml', {'extends': ['_base.yaml', '_cisco-generic.yaml']}, id='generic'),
pytest.param(
'_base_cisco_voice.yaml',
{'extends': ['_base.yaml', '_cisco-generic.yaml', '_cisco-voice.yaml']},
id='voice',
),
],
)
def test_compat_cisco_base_profiles(definition_file, equivalent_definition):
# type: (str, dict) -> None
definition = get_profile_definition({'definition_file': definition_file})
recursively_expand_base_profiles(definition)
recursively_expand_base_profiles(equivalent_definition)
assert definition == equivalent_definition
@pytest.mark.usefixtures("dd_environment")
def test_cisco_voice(aggregator):
run_profile_check('cisco_icm')
tags = [
'snmp_profile:cisco_icm',
'snmp_host:test',
'device_vendor:cisco',
] + common.CHECK_TAGS
resources = ["hrSWRunPerfMem", "hrSWRunPerfCPU"]
common.assert_common_metrics(aggregator, tags)
for resource in resources:
aggregator.assert_metric('snmp.{}'.format(resource), metric_type=aggregator.GAUGE, tags=tags)
run_indices = [4, 7, 8, 9, 10, 18, 24, 29, 30]
for index in run_indices:
status_tags = tags + ['run_index:{}'.format(index)]
aggregator.assert_metric('snmp.hrSWRunStatus', metric_type=aggregator.GAUGE, tags=status_tags)
cvp_gauges = [
"ccvpSipIntAvgLatency1",
"ccvpSipIntAvgLatency2",
"ccvpSipIntConnectsRcv",
"ccvpSipIntNewCalls",
"ccvpSipRtActiveCalls",
"ccvpSipRtTotalCallLegs",
"ccvpLicRtPortsInUse",
"ccvpLicAggMaxPortsInUse",
]
for cvp in cvp_gauges:
aggregator.assert_metric('snmp.{}'.format(cvp), metric_type=aggregator.GAUGE, tags=tags)
ccms_counts = ["ccmRejectedPhones", "ccmUnregisteredPhones"]
ccms_gauges = ["ccmRegisteredGateways", "ccmRegisteredPhones"]
for ccm in ccms_counts:
aggregator.assert_metric('snmp.{}'.format(ccm), metric_type=aggregator.RATE, tags=tags)
for ccm in ccms_gauges:
aggregator.assert_metric('snmp.{}'.format(ccm), metric_type=aggregator.GAUGE, tags=tags)
calls = [
"cvCallVolPeerIncomingCalls",
"cvCallVolPeerOutgoingCalls",
]
peers = [4, 13, 14, 17, 18, 22, 25, 30, 31]
for call in calls:
for peer in peers:
peer_tags = tags + ["peer_index:{}".format(peer)]
aggregator.assert_metric('snmp.{}'.format(call), metric_type=aggregator.GAUGE, tags=peer_tags)
calls = [
"cvCallVolMediaIncomingCalls",
"cvCallVolMediaOutgoingCalls",
]
for call in calls:
aggregator.assert_metric('snmp.{}'.format(call), metric_type=aggregator.GAUGE, tags=tags)
dial_controls = [
"dialCtlPeerStatsAcceptCalls",
"dialCtlPeerStatsFailCalls",
"dialCtlPeerStatsRefuseCalls",
"dialCtlPeerStatsSuccessCalls",
]
for ctl in dial_controls:
aggregator.assert_metric(
'snmp.{}'.format(ctl), metric_type=aggregator.MONOTONIC_COUNT, tags=["peer_index:7"] + tags
)
pim_tags = tags + ['pim_host:test', 'pim_name:name', 'pim_num:2']
aggregator.assert_metric('snmp.{}'.format("cccaPimStatus"), metric_type=aggregator.GAUGE, tags=pim_tags)
aggregator.assert_metric('snmp.{}'.format("sysUpTimeInstance"), metric_type=aggregator.GAUGE, tags=tags, count=1)
instance_numbers = ['4446', '5179', '12093', '19363', '25033', '37738', '42562', '51845', '62906', '63361']
for metric in CCCA_ROUTER_GAUGES:
for instance_number in instance_numbers:
instance_tags = tags + ['instance_number:{}'.format(instance_number)]
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=instance_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_f5(aggregator):
profile = 'f5-big-ip'
run_profile_check(profile)
gauges = [
'sysStatMemoryTotal',
'sysStatMemoryUsed',
'sysGlobalTmmStatMemoryTotal',
'sysGlobalTmmStatMemoryUsed',
'sysGlobalHostOtherMemoryTotal',
'sysGlobalHostOtherMemoryUsed',
'sysGlobalHostSwapTotal',
'sysGlobalHostSwapUsed',
'sysTcpStatOpen',
'sysTcpStatCloseWait',
'sysTcpStatFinWait',
'sysTcpStatTimeWait',
'sysUdpStatOpen',
'sysClientsslStatCurConns',
]
counts = [
'sysTcpStatAccepts',
'sysTcpStatAcceptfails',
'sysTcpStatConnects',
'sysTcpStatConnfails',
'sysUdpStatAccepts',
'sysUdpStatAcceptfails',
'sysUdpStatConnects',
'sysUdpStatConnfails',
'sysClientsslStatEncryptedBytesIn',
'sysClientsslStatEncryptedBytesOut',
'sysClientsslStatDecryptedBytesIn',
'sysClientsslStatDecryptedBytesOut',
'sysClientsslStatHandshakeFailures',
]
cpu_rates = [
'sysMultiHostCpuUser',
'sysMultiHostCpuNice',
'sysMultiHostCpuSystem',
'sysMultiHostCpuIdle',
'sysMultiHostCpuIrq',
'sysMultiHostCpuSoftirq',
'sysMultiHostCpuIowait',
]
interfaces = [
('1.0', 'desc2'),
('mgmt', 'desc1'),
('/Common/internal', 'desc5'),
('/Common/http-tunnel', 'desc3'),
('/Common/socks-tunnel', 'desc4'),
]
interfaces_with_bandwidth_usage = {
'1.0',
'mgmt',
'/Common/internal',
}
tags = [
'snmp_profile:' + profile,
'snmp_host:f5-big-ip-adc-good-byol-1-vm.c.datadog-integrations-lab.internal',
'device_vendor:f5',
]
tags += common.CHECK_TAGS
common.assert_common_metrics(aggregator, tags)
for metric in gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in counts:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for metric in cpu_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=['cpu:0'] + tags, count=1)
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=['cpu:1'] + tags, count=1)
for interface, desc in interfaces:
interface_tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(desc)] + tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=interface_tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=interface_tags, count=1
)
if interface in interfaces_with_bandwidth_usage:
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=interface_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric(
'snmp.{}'.format(metric),
metric_type=aggregator.GAUGE,
tags=interface_tags,
count=1,
)
for version in ['ipv4', 'ipv6']:
ip_tags = ['ipversion:{}'.format(version)] + tags
for metric in IP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=ip_tags, count=1
)
for metric in LTM_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
servers = ['server1', 'server2', 'server3']
for server in servers:
server_tags = tags + ['server:{}'.format(server)]
for metric in LTM_VIRTUAL_SERVER_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=server_tags, count=1)
for metric in LTM_VIRTUAL_SERVER_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=server_tags, count=1
)
for metric in LTM_VIRTUAL_SERVER_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=server_tags, count=1)
nodes = ['node1', 'node2', 'node3']
for node in nodes:
node_tags = tags + ['node:{}'.format(node)]
for metric in LTM_NODES_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=node_tags, count=1)
for metric in LTM_NODES_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=node_tags, count=1
)
for metric in LTM_NODES_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=node_tags, count=1)
pools = ['pool1', 'pool2']
for pool in pools:
pool_tags = tags + ['pool:{}'.format(pool)]
for metric in LTM_POOL_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=pool_tags, count=1)
for metric in LTM_POOL_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=pool_tags, count=1
)
for metric in LTM_POOL_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=pool_tags, count=1)
pool_members = [('pool1', 'node1'), ('pool1', 'node2'), ('pool2', 'node3')]
for pool, node in pool_members:
pool_member_tags = tags + ['pool:{}'.format(pool), 'node:{}'.format(node)]
for metric in LTM_POOL_MEMBER_GAUGES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=pool_member_tags, count=1
)
for metric in LTM_POOL_MEMBER_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=pool_member_tags, count=1
)
for metric in LTM_POOL_MEMBER_RATES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=pool_member_tags, count=1
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_router(aggregator):
profile = "generic-router"
run_profile_check(profile)
common_tags = common.CHECK_TAGS + ['snmp_profile:' + profile]
common.assert_common_metrics(aggregator, common_tags)
interfaces = [
('eth0', 'kept'),
('eth1', 'their forward oxen'),
]
for interface, if_desc in interfaces:
tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(if_desc)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for version in ['ipv4', 'ipv6']:
tags = ['ipversion:{}'.format(version)] + common_tags
for metric in IP_COUNTS + IPX_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IP_IF_COUNTS:
for interface in ['17', '21']:
tags = ['ipversion:{}'.format(version), 'interface:{}'.format(interface)] + common_tags
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_f5_router(aggregator):
# Use the generic profile against the f5 device
instance = common.generate_instance_config([])
instance['community_string'] = 'f5-big-ip'
instance['enforce_mib_constraints'] = False
init_config = {'profiles': {'router': {'definition_file': 'generic-router.yaml'}}}
check = SnmpCheck('snmp', init_config, [instance])
check.check(instance)
interfaces = [
('1.0', 'desc2'),
('mgmt', 'desc1'),
('/Common/internal', 'desc5'),
('/Common/http-tunnel', 'desc3'),
('/Common/socks-tunnel', 'desc4'),
]
interfaces_with_bandwidth_usage = {
'1.0',
'mgmt',
'/Common/internal',
}
common_tags = [
'snmp_profile:router',
'snmp_host:f5-big-ip-adc-good-byol-1-vm.c.datadog-integrations-lab.internal',
]
common_tags.extend(common.CHECK_TAGS)
common.assert_common_metrics(aggregator, common_tags)
for interface, desc in interfaces:
tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(desc)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
if interface in interfaces_with_bandwidth_usage:
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for version in ['ipv4', 'ipv6']:
tags = ['ipversion:{}'.format(version)] + common_tags
for metric in IP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_3850(aggregator):
profile = "cisco-3850"
run_profile_check(profile)
# We're not covering all interfaces
interfaces = ["Gi1/0/{}".format(i) for i in range(1, 48)]
common_tags = common.CHECK_TAGS + [
'snmp_host:Cat-3850-4th-Floor.companyname.local',
'snmp_profile:' + profile,
'device_vendor:cisco',
]
common.assert_common_metrics(aggregator, common_tags)
aliases = {
'Gi1/0/24': 'LWAP-example',
'Gi1/0/33': 'switchboard console',
'Gi1/0/38': 'Mitel Console',
'Gi1/1/3': 'Link to Switch',
'Gi2/0/13': 'AP01',
'Gi2/0/14': 'AP02',
'Gi2/0/15': 'AP03',
'Gi2/0/16': 'AP04',
'Gi2/0/17': 'AP05',
'Gi2/0/18': 'AP06',
'Gi2/1/4': 'Link to Switch',
}
for interface in interfaces:
alias = aliases.get(interface, '')
tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(alias)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IP_COUNTS + IPX_COUNTS:
tags = common_tags + ['ipversion:ipv6']
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
sensors = [1006, 1007, 1008, 2006, 2007, 2008]
for sensor in sensors:
tags = ['sensor_id:{}'.format(sensor), 'sensor_type:8'] + common_tags
aggregator.assert_metric('snmp.entSensorValue', metric_type=aggregator.GAUGE, tags=tags, count=1)
frus = [1001, 1010, 2001, 2010]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpus = [1000, 2000]
for cpu in cpus:
tags = ['cpu:{}'.format(cpu)] + common_tags
for metric in CPU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in CIE_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.cieIfResetCount', metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
power_supplies = [
(1, 'Switch 1 - Power Supply B, NotExist'),
(1, 'Switch 2 - Power Supply B, NotExist'),
(2, 'Switch 1 - Power Supply A, Normal'),
(2, 'Switch 2 - Power Supply A, Normal'),
]
for source, descr in power_supplies:
env_tags = ['power_source:{}'.format(source), 'power_status_descr:{}'.format(descr)]
aggregator.assert_metric(
'snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=env_tags + common_tags
)
aggregator.assert_metric('snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE)
for switch, mac_addr in [(1, '0x046c9d42b080'), (2, '0xdccec1430680')]:
tags = ['entity_name:Switch {}'.format(switch), 'mac_addr:{}'.format(mac_addr)] + common_tags
aggregator.assert_metric('snmp.cswSwitchState', metric_type=aggregator.GAUGE, tags=tags)
frus = [1011, 1012, 1013, 2011, 2012, 2013]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
aggregator.assert_metric(
'snmp.cefcFanTrayOperStatus', metric_type=aggregator.GAUGE, tags=['fru:{}'.format(fru)] + common_tags
)
for metrics in MEMORY_METRICS:
for pool in ['Processor', 'IOS Process stack']:
tags = ['mem_pool_name:{}'.format(pool)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metrics), metric_type=aggregator.GAUGE, tags=tags)
neighbor_metrics = [
('ospfNbrEvents', aggregator.RATE),
('ospfNbrState', aggregator.GAUGE),
('ospfNbrLsRetransQLen', aggregator.GAUGE),
]
for metric, metric_type in neighbor_metrics:
tags = ['neighbor_ip:192.29.116.26', 'neighbor_id:192.29.66.79'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=metric_type, tags=tags, count=1)
lls_metrics = ['ospfIfRetransInterval', 'ospfIfState']
for metric in lls_metrics:
tags = ['ospf_ip_addr:192.29.116.25'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for temp_index in [1006, 1007, 1008, 2006, 2007, 2008]:
env_tag = ['temp_index:{}'.format(temp_index), 'temp_state:1']
aggregator.assert_metric(
'snmp.ciscoEnvMonTemperatureStatusValue', metric_type=aggregator.GAUGE, tags=env_tag + common_tags
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_meraki_cloud_controller(aggregator):
run_profile_check('meraki-cloud-controller')
common_tags = common.CHECK_TAGS + [
'snmp_profile:meraki-cloud-controller',
'snmp_host:dashboard.meraki.com',
'device_vendor:meraki',
]
common.assert_common_metrics(aggregator, common_tags)
dev_metrics = ['devStatus', 'devClientCount']
dev_tags = ['device:Gymnasium', 'product:MR16-HW', 'network:L_NETWORK'] + common_tags
for metric in dev_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=dev_tags, count=1)
if_tags = ['interface:wifi0', 'index:4'] + common_tags
if_metrics = ['devInterfaceSentPkts', 'devInterfaceRecvPkts', 'devInterfaceSentBytes', 'devInterfaceRecvBytes']
for metric in if_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
if_tags = ['interface:eth0'] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=if_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_idrac(aggregator):
run_profile_check('idrac')
interfaces = ['eth0', 'en1']
common_tags = common.CHECK_TAGS + ['snmp_profile:idrac', 'device_vendor:dell']
common.assert_common_metrics(aggregator, common_tags)
for interface in interfaces:
tags = ['adapter:{}'.format(interface)] + common_tags
for count in ADAPTER_IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(count), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
indexes = ['26', '29']
for index in indexes:
tags = ['chassis_index:{}'.format(index)] + common_tags
for gauge in SYSTEM_STATUS_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
powers = ['supply1', 'supply2']
for power in powers:
tags = ['supply_name:{}'.format(power)] + common_tags
aggregator.assert_metric('snmp.enclosurePowerSupplyState', metric_type=aggregator.GAUGE, tags=tags, count=1)
disks = ['disk1', 'disk2']
for disk in disks:
tags = ['disk_name:{}'.format(disk)] + common_tags
for gauge in DISK_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
batteries = ['battery1', 'battery2']
for battery_name in batteries:
tags = ['battery_name:{}'.format(battery_name)] + common_tags
aggregator.assert_metric('snmp.{}'.format("batteryState"), metric_type=aggregator.GAUGE, tags=tags, count=1)
controllers = ['controller1', 'controller2']
for controller in controllers:
tags = ['controller_name:{}'.format(controller)] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("controllerRollUpStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1
)
devices = ['device1', 'device2']
indexes = ['10', '20']
for device, index in zip(devices, indexes):
tags = ['device_descr_name:{}'.format(device), 'chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.{}'.format("pCIDeviceStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1)
slots = ['slot1', 'slot2']
indexes = ['19', '21']
for slot, index in zip(slots, indexes):
tags = ['slot_name:{}'.format(slot), 'chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.{}'.format("systemSlotStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [('29', 'device2', '0x9e00e0291401'), ('3', 'device1', '0x9e00e0291401')]
for index, device, mac in tag_mappings:
tags = [
'chassis_index:{}'.format(index),
'device_fqdd:{}'.format(device),
'mac_addr:{}'.format(mac),
] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("networkDeviceStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1
)
indexes = ['3', '31']
for index in indexes:
tags = ['chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.{}'.format("systemBIOSStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1)
indexes = ['9', '18']
probe_types = ['26', '26']
for index, probe_type in zip(indexes, probe_types):
tags = ['chassis_index:{}'.format(index), 'probe_type:{}'.format(probe_type)] + common_tags
for gauge in PROBE_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
indexes = ['12', '22']
probe_types = ['6', '3']
for index, probe_type in zip(indexes, probe_types):
tags = ['chassis_index:{}'.format(index), 'probe_type:{}'.format(probe_type)] + common_tags
for gauge in VOLTAGE_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
indexes = ['29', '22']
device_types = ['26', '4']
device_indexes = ['4', '21']
for index, device_type, device_index in zip(indexes, device_types, device_indexes):
tags = [
'chassis_index:{}'.format(index),
'device_type:{}'.format(device_type),
'device_index:{}'.format(device_index),
] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("memoryDeviceStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1
)
for gauge in DRS_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_nexus(aggregator):
profile = "cisco-nexus"
run_profile_check(profile)
interfaces = ["GigabitEthernet1/0/{}".format(i) for i in range(1, 9)]
common_tags = common.CHECK_TAGS + [
'snmp_host:Nexus-eu1.companyname.managed',
'snmp_profile:' + profile,
'device_vendor:cisco',
]
common.assert_common_metrics(aggregator, common_tags)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
aggregator.assert_metric('snmp.cieIfResetCount', metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for interface in interfaces:
tags = ['interface:{}'.format(interface), 'interface_alias:'] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
sensors = [1, 9, 11, 12, 12, 14, 17, 26, 29, 31]
for sensor in sensors:
tags = ['sensor_id:{}'.format(sensor), 'sensor_type:8'] + common_tags
aggregator.assert_metric('snmp.entSensorValue', metric_type=aggregator.GAUGE, tags=tags, count=1)
frus = [6, 7, 15, 16, 19, 27, 30, 31]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpus = [3173, 6692, 11571, 19529, 30674, 38253, 52063, 54474, 55946, 63960]
for cpu in cpus:
tags = ['cpu:{}'.format(cpu)] + common_tags
for metric in CPU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for (index, state) in [(3, 3), (6, 6), (8, 6), (11, 6), (13, 3), (14, 6), (20, 6), (21, 4), (31, 5)]:
aggregator.assert_metric(
'snmp.ciscoEnvMonTemperatureStatusValue',
metric_type=aggregator.GAUGE,
tags=['temp_state:{}'.format(state), 'temp_index:{}'.format(index)] + common_tags,
)
power_supply_tags = ['power_source:1', 'power_status_descr:Jaded driving their their their'] + common_tags
aggregator.assert_metric('snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=power_supply_tags)
fan_indices = [4, 6, 7, 16, 21, 22, 25, 27]
for index in fan_indices:
tags = ['fan_status_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_metric(
'snmp.cswStackPortOperStatus',
metric_type=aggregator.GAUGE,
tags=common_tags + ['interface:GigabitEthernet1/0/1'],
)
aggregator.assert_metric(
'snmp.cswSwitchState', metric_type=aggregator.GAUGE, tags=['mac_addr:0xffffffffffff'] + common_tags
)
frus = [2, 7, 8, 21, 26, 27, 30, 31]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
aggregator.assert_metric(
'snmp.cefcFanTrayOperStatus', metric_type=aggregator.GAUGE, tags=['fru:{}'.format(fru)] + common_tags
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_dell_poweredge(aggregator):
run_profile_check('dell-poweredge')
sys_mem_gauges = [
'operatingSystemMemoryAvailablePhysicalSize',
'operatingSystemMemoryTotalPageFileSize',
'operatingSystemMemoryAvailablePageFileSize',
'operatingSystemMemoryTotalVirtualSize',
'operatingSystemMemoryAvailableVirtualSize',
]
power_supply_gauges = [
'powerSupplyStatus',
'powerSupplyOutputWatts',
'powerSupplyMaximumInputVoltage',
'powerSupplyCurrentInputVoltage',
]
temperature_probe_gauges = ['temperatureProbeStatus', 'temperatureProbeReading']
processor_device_gauges = ['processorDeviceStatus', 'processorDeviceThreadCount']
cache_device_gauges = ['cacheDeviceStatus', 'cacheDeviceMaximumSize', 'cacheDeviceCurrentSize']
memory_device_gauges = ['memoryDeviceStatus', 'memoryDeviceFailureModes']
idrac_gauges = (
['batteryState', 'controllerRollUpStatus', 'pCIDeviceStatus', 'systemSlotStatus', 'systemBIOSStatus']
+ VOLTAGE_GAUGES
+ PROBE_GAUGES
)
common_tags = common.CHECK_TAGS + [
'snmp_profile:dell-poweredge',
'device_vendor:dell',
]
common.assert_common_metrics(aggregator, common_tags)
chassis_indexes = [29, 31]
for chassis_index in chassis_indexes:
tags = ['chassis_index:{}'.format(chassis_index)] + common_tags
for metric in sys_mem_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [5, 17]
for index in indexes:
tags = ['chassis_index:4', 'index:{}'.format(index)] + common_tags
for metric in power_supply_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [13]
for index in indexes:
tags = ['chassis_index:18', 'index:{}'.format(index)] + common_tags
for metric in temperature_probe_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [17, 28]
for index in indexes:
tags = ['chassis_index:5', 'index:{}'.format(index)] + common_tags
for metric in processor_device_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [15, 27]
for index in indexes:
tags = ['chassis_index:11', 'index:{}'.format(index)] + common_tags
for metric in cache_device_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
serial_numbers = ['forward zombies acted Jaded', 'kept oxen their their oxen oxen']
for serial_number in serial_numbers:
tags = ['serial_number_name:{}'.format(serial_number), 'chassis_index:1'] + common_tags
for metric in memory_device_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
ip_addresses = ['66.97.1.103', '62.148.76.32', '45.3.243.155']
for ip_address in ip_addresses:
tags = ['ip_address:{}'.format(ip_address)] + common_tags
aggregator.assert_metric('snmp.networkDeviceStatus', metric_type=aggregator.GAUGE, tags=tags, at_least=1)
interfaces = ['eth0', 'en1']
for interface in interfaces:
tags = ['adapter:{}'.format(interface)] + common_tags
for count in ADAPTER_IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(count), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
indexes = ['26', '29']
for index in indexes:
tags = ['chassis_index:{}'.format(index)] + common_tags
for gauge in SYSTEM_STATUS_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
powers = ['supply1', 'supply2']
for power in powers:
tags = ['supply_name:{}'.format(power)] + common_tags
aggregator.assert_metric('snmp.enclosurePowerSupplyState', metric_type=aggregator.GAUGE, tags=tags, count=1)
disks = ['disk1', 'disk2']
for disk in disks:
tags = ['disk_name:{}'.format(disk)] + common_tags
for gauge in DISK_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
for gauge in idrac_gauges:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_hp_ilo4(aggregator):
profile = "hp-ilo4"
run_profile_check(profile)
status_gauges = [
'cpqHeCritLogCondition',
'cpqHeCorrMemLogStatus',
'cpqHeCorrMemLogCondition',
'cpqHeAsrStatus',
'cpqHeAsrPost',
'cpqHeAsrCondition',
'cpqHeAsrNetworkAccessStatus',
'cpqHeThermalCondition',
'cpqHeThermalTempStatus',
'cpqHeThermalSystemFanStatus',
'cpqHeThermalCpuFanStatus',
'cpqNicVtVirusActivity',
'cpqSm2CntlrServerPowerState',
'cpqSm2CntlrBatteryStatus',
'cpqSm2CntlrRemoteSessionStatus',
'cpqSm2CntlrInterfaceStatus',
]
cpqhlth_counts = ['cpqHeAsrRebootCount', 'cpqHeCorrMemTotalErrs']
cpqhlth_gauges = ['cpqHeSysUtilEisaBusMin', 'cpqHePowerMeterCurrReading', 'cpqHeSysUtilLifeTime']
cpqsm2_gauges = [
'cpqSm2CntlrBatteryPercentCharged',
'cpqSm2CntlrSelfTestErrors',
'cpqSm2EventTotalEntries',
]
EMBEDDED = 2
PCMCIA = 3
card_locations = [EMBEDDED, PCMCIA]
network_card_counts = [
'cpqSm2NicXmitBytes',
'cpqSm2NicXmitTotalPackets',
'cpqSm2NicXmitDiscardPackets',
'cpqSm2NicXmitErrorPackets',
'cpqSm2NicXmitQueueLength',
'cpqSm2NicRecvBytes',
'cpqSm2NicRecvTotalPackets',
'cpqSm2NicRecvDiscardPackets',
'cpqSm2NicRecvErrorPackets',
'cpqSm2NicRecvUnknownPackets',
]
interfaces = ['eth0', 'en1']
phys_adapter_counts = [
'cpqNicIfPhysAdapterGoodTransmits',
'cpqNicIfPhysAdapterGoodReceives',
'cpqNicIfPhysAdapterBadTransmits',
'cpqNicIfPhysAdapterBadReceives',
'cpqNicIfPhysAdapterInOctets',
'cpqNicIfPhysAdapterOutOctets',
]
phys_adapter_gauges = ['cpqNicIfPhysAdapterSpeed', 'cpqNicIfPhysAdapterSpeedMbps']
temperature_sensors = [1, 13, 28]
batteries = [1, 3, 4, 5]
common_tags = common.CHECK_TAGS + ['snmp_profile:' + profile, 'device_vendor:hp']
common.assert_common_metrics(aggregator, common_tags)
for metric in status_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in cpqhlth_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in cpqhlth_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in cpqsm2_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for index in temperature_sensors:
tags = ['temperature_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.cpqHeTemperatureCelsius', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.cpqHeTemperatureCondition', metric_type=aggregator.GAUGE, tags=tags, count=1)
for index in batteries:
tags = ['battery_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.cpqHeSysBatteryCondition', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.cpqHeSysBatteryStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
for location in card_locations:
tags = ['nic_stats_location:{}'.format(location)] + common_tags
for metric in network_card_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in phys_adapter_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in phys_adapter_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
drive_counts = [
"cpqDaPhyDrvUsedReallocs",
"cpqDaPhyDrvRefHours",
"cpqDaPhyDrvHardReadErrs",
"cpqDaPhyDrvRecvReadErrs",
"cpqDaPhyDrvHardWriteErrs",
"cpqDaPhyDrvRecvWriteErrs",
"cpqDaPhyDrvHSeekErrs",
"cpqDaPhyDrvSeekErrs",
]
drive_gauges = [
"cpqDaPhyDrvStatus",
"cpqDaPhyDrvFactReallocs",
"cpqDaPhyDrvSpinupTime",
"cpqDaPhyDrvSize",
"cpqDaPhyDrvSmartStatus",
"cpqDaPhyDrvCurrentTemperature",
]
drive_idx = [(0, 2), (0, 28), (8, 31), (9, 24), (9, 28), (10, 17), (11, 4), (12, 20), (18, 22), (23, 2)]
for drive_cntrl_idx, drive_index in drive_idx:
tags = ['drive_cntrl_idx:{}'.format(drive_cntrl_idx), "drive_index:{}".format(drive_index)] + common_tags
for metric in drive_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in drive_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_proliant(aggregator):
run_profile_check('hpe-proliant')
common_tags = common.CHECK_TAGS + ['snmp_profile:hpe-proliant', 'device_vendor:hp']
common.assert_common_metrics(aggregator, common_tags)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
cpu_gauges = [
"cpqSeCpuSlot",
"cpqSeCpuSpeed",
"cpqSeCpuStatus",
"cpqSeCpuExtSpeed",
"cpqSeCpuCore",
"cpqSeCPUCoreMaxThreads",
"cpqSeCpuPrimary",
]
cpu_indexes = [0, 4, 6, 8, 13, 15, 26, 27]
for idx in cpu_indexes:
tags = ['cpu_index:{}'.format(idx)] + common_tags
for metric in cpu_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpu_util_gauges = ["cpqHoCpuUtilMin", "cpqHoCpuUtilFiveMin", "cpqHoCpuUtilThirtyMin", "cpqHoCpuUtilHour"]
cpu_unit_idx = [4, 7, 13, 20, 22, 23, 29]
for idx in cpu_unit_idx:
tags = ['cpu_unit_index:{}'.format(idx)] + common_tags
for metric in cpu_util_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
file_sys_gauges = [
"cpqHoFileSysSpaceTotal",
"cpqHoFileSysSpaceUsed",
"cpqHoFileSysPercentSpaceUsed",
"cpqHoFileSysAllocUnitsTotal",
"cpqHoFileSysAllocUnitsUsed",
"cpqHoFileSysStatus",
]
file_sys_idx = [5, 8, 11, 15, 19, 21, 28, 30]
for idx in file_sys_idx:
tags = ['file_sys_index:{}'.format(idx)] + common_tags
for metric in file_sys_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
memory_gauges = [
"cpqSiMemModuleSize",
"cpqSiMemModuleType",
"cpqSiMemModuleSpeed",
"cpqSiMemModuleTechnology",
"cpqSiMemModuleECCStatus",
"cpqSiMemModuleFrequency",
"cpqSiMemModuleCellStatus",
]
memory_idx = [(6, 16), (7, 17), (7, 30), (8, 20), (10, 4), (15, 27), (20, 14), (21, 14), (23, 0), (28, 20)]
for board_idx, mem_module_index in memory_idx:
tags = ['mem_board_index:{}'.format(board_idx), "mem_module_index:{}".format(mem_module_index)] + common_tags
for metric in memory_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
drive_counts = [
"cpqDaPhyDrvUsedReallocs",
"cpqDaPhyDrvRefHours",
"cpqDaPhyDrvHardReadErrs",
"cpqDaPhyDrvRecvReadErrs",
"cpqDaPhyDrvHardWriteErrs",
"cpqDaPhyDrvRecvWriteErrs",
"cpqDaPhyDrvHSeekErrs",
"cpqDaPhyDrvSeekErrs",
]
drive_gauges = [
"cpqDaPhyDrvStatus",
"cpqDaPhyDrvFactReallocs",
"cpqDaPhyDrvSpinupTime",
"cpqDaPhyDrvSize",
"cpqDaPhyDrvSmartStatus",
"cpqDaPhyDrvCurrentTemperature",
]
drive_idx = [(0, 2), (0, 28), (8, 31), (9, 24), (9, 28), (10, 17), (11, 4), (12, 20), (18, 22), (23, 2)]
for drive_cntrl_idx, drive_index in drive_idx:
tags = ['drive_cntrl_idx:{}'.format(drive_cntrl_idx), "drive_index:{}".format(drive_index)] + common_tags
for metric in drive_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in drive_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
interfaces = [
('eth0', 'quaintly zombies quaintly forward'),
('eth1', 'quaintly but quaintly quaintly'),
]
for interface, desc in interfaces:
if_tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(desc)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=if_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
mem_boards = ['11', '12']
for board in mem_boards:
tags = ['mem_board_index:{}'.format(board)] + common_tags
aggregator.assert_metric('snmp.cpqHeResMem2ModuleCondition', metric_type=aggregator.GAUGE, tags=tags, count=1)
adapter_gauges = ['cpqNicIfPhysAdapterStatus', 'cpqNicIfPhysAdapterState']
for gauge in adapter_gauges:
tags = ['adapter_name:adapter', 'adapter_mac_addr:mac'] + common_tags
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
power_metrics = [
'cpqHeFltTolPowerSupplyStatus',
'cpqHeFltTolPowerSupplyCapacityUsed',
'cpqHeFltTolPowerSupplyCapacityMaximum',
]
for gauge in power_metrics:
tags = ['chassis_num:30'] + common_tags
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
controller_index = ['controller_index:3'] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("cpqDaCntlrCondition"), metric_type=aggregator.GAUGE, tags=controller_index, count=1
)
thermal_metrics = ['cpqHeThermalCondition', 'cpqHeSysUtilLifeTime', 'cpqHeFltTolPwrSupplyStatus']
for metric in thermal_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_generic_host_resources(aggregator):
instance = common.generate_instance_config([])
instance['community_string'] = 'generic_host'
instance['enforce_mib_constraints'] = False
instance['profile'] = 'generic'
init_config = {'profiles': {'generic': {'definition_file': '_generic-host-resources.yaml'}}}
check = SnmpCheck('snmp', init_config, [instance])
check.check(instance)
common_tags = common.CHECK_TAGS + ['snmp_profile:generic']
common.assert_common_metrics(aggregator, common_tags)
sys_metrics = [
'snmp.hrSystemUptime',
'snmp.hrSystemNumUsers',
'snmp.hrSystemProcesses',
'snmp.hrSystemMaxProcesses',
]
for metric in sys_metrics:
aggregator.assert_metric(metric, metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_metric('snmp.hrStorageAllocationUnits', count=2)
aggregator.assert_metric('snmp.hrStorageSize', count=2)
aggregator.assert_metric('snmp.hrStorageUsed', count=2)
aggregator.assert_metric('snmp.hrStorageAllocationFailures', count=2)
aggregator.assert_metric('snmp.hrProcessorLoad', count=2)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_palo_alto(aggregator):
profile = "palo-alto"
run_profile_check(profile)
common_tags = common.CHECK_TAGS + [
'snmp_profile:' + profile,
'device_vendor:paloaltonetworks',
]
common.assert_common_metrics(aggregator, common_tags)
session = [
'panSessionUtilization',
'panSessionMax',
'panSessionActive',
'panSessionActiveTcp',
'panSessionActiveUdp',
'panSessionActiveICMP',
'panSessionActiveSslProxy',
'panSessionSslProxyUtilization',
]
global_protect = [
'panGPGWUtilizationPct',
'panGPGWUtilizationMaxTunnels',
'panGPGWUtilizationActiveTunnels',
]
entity = [
'panEntityTotalPowerAvail',
'panEntityTotalPowerUsed',
]
entry = ['panEntryFRUModulePowerUsed', 'panEntryFRUModuleNumPorts']
for metric in session:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in global_protect:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in entity:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in entry:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.panEntryFanTrayPowerUsed', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_asa_all(aggregator):
profile = "cisco-asa"
assert_cisco_asa(aggregator, profile)
@pytest.mark.usefixtures("dd_environment")
def test_cisco_asa_5525(aggregator):
profile = "cisco-asa-5525"
assert_cisco_asa(aggregator, profile)
def assert_cisco_asa(aggregator, profile):
run_profile_check(profile)
common_tags = common.CHECK_TAGS + [
'snmp_profile:' + profile,
'snmp_host:kept',
'device_vendor:cisco',
]
common.assert_common_metrics(aggregator, common_tags)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
if_tags = ['interface:eth0'] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=if_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
aggregator.assert_metric('snmp.cieIfResetCount', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1)
frus = [3, 4, 5, 7, 16, 17, 24, 25]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpus = [7746]
for cpu in cpus:
tags = ['cpu:{}'.format(cpu)] + common_tags
for metric in CPU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
sensor_tags = ['sensor_id:31', 'sensor_type:9'] + common_tags
aggregator.assert_metric('snmp.entPhySensorValue', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
stat_tags = [(20, 2), (5, 5)]
for (svc, stat) in stat_tags:
aggregator.assert_metric(
'snmp.cfwConnectionStatValue',
metric_type=aggregator.GAUGE,
tags=['stat_type:{}'.format(stat), 'service_type:{}'.format(svc)] + common_tags,
)
aggregator.assert_metric('snmp.crasNumDeclinedSessions', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.crasNumSessions', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.crasNumUsers', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric(
'snmp.crasNumSetupFailInsufResources', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags
)
aggregator.assert_metric('snmp.cipSecGlobalActiveTunnels', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.cipSecGlobalHcInOctets', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags)
aggregator.assert_metric('snmp.cipSecGlobalHcOutOctets', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags)
for (index, state) in [(3, 3), (6, 6), (8, 6), (11, 6), (13, 3), (14, 6), (20, 6), (21, 4), (31, 5)]:
aggregator.assert_metric(
'snmp.ciscoEnvMonTemperatureStatusValue',
metric_type=aggregator.GAUGE,
tags=['temp_state:{}'.format(state), 'temp_index:{}'.format(index)] + common_tags,
)
power_supply_tags = ['power_source:1', 'power_status_descr:Jaded driving their their their'] + common_tags
aggregator.assert_metric('snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=power_supply_tags)
fan_indices = [4, 6, 7, 16, 21, 22, 25, 27]
for index in fan_indices:
tags = ['fan_status_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE)
aggregator.assert_metric(
'snmp.cswSwitchState', metric_type=aggregator.GAUGE, tags=['mac_addr:0xffffffffffff'] + common_tags
)
frus = [2, 7, 8, 21, 26, 27, 30, 31]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
aggregator.assert_metric(
'snmp.cefcFanTrayOperStatus', metric_type=aggregator.GAUGE, tags=['fru:{}'.format(fru)] + common_tags
)
for metrics in MEMORY_METRICS:
tags = ['mem_pool_name:test_pool'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metrics), metric_type=aggregator.GAUGE, tags=tags)
for conn in [1, 2, 5]:
conn_tags = ['connection_type:{}'.format(conn)] + common_tags
aggregator.assert_metric('snmp.cfwConnectionStatCount', metric_type=aggregator.RATE, tags=conn_tags)
hardware_tags = [(3, 'Secondary unit'), (5, 'Primary unit'), (6, 'Failover LAN Interface')]
for (htype, hdesc) in hardware_tags:
aggregator.assert_metric(
'snmp.cfwHardwareStatusValue',
metric_type=aggregator.GAUGE,
tags=['hardware_type:{}'.format(htype), 'hardware_desc:{}'.format(hdesc)] + common_tags,
)
for switch in [4684, 4850, 8851, 9997, 15228, 16580, 24389, 30813, 36264]:
aggregator.assert_metric(
'snmp.cvsChassisUpTime',
metric_type=aggregator.GAUGE,
tags=['chassis_switch_id:{}'.format(switch)] + common_tags,
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
rtt_indexes = [1, 7, 10, 13, 15, 18, 20]
rtt_types = [22, 21, 17, 6, 20, 8, 16]
rtt_states = [3, 1, 6, 4, 6, 1, 6]
rtt_gauges = ['rttMonLatestRttOperCompletionTime', 'rttMonLatestRttOperSense', 'rttMonCtrlOperTimeoutOccurred']
for i in range(len(rtt_indexes)):
tags = [
"rtt_index:{}".format(rtt_indexes[i]),
"rtt_type:{}".format(rtt_types[i]),
"rtt_state:{}".format(rtt_states[i]),
] + common_tags
for rtt in rtt_gauges:
aggregator.assert_metric('snmp.{}'.format(rtt), metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_csr(aggregator):
run_profile_check('cisco-csr1000v')
common_tags = common.CHECK_TAGS + [
'snmp_profile:cisco-csr1000v',
'device_vendor:cisco',
]
common.assert_common_metrics(aggregator, common_tags)
tags = ['neighbor:244.12.239.177'] + common_tags
for metric in PEER_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
for metric in PEER_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
@pytest.mark.usefixtures("dd_environment")
def test_checkpoint_firewall(aggregator):
run_profile_check('checkpoint-firewall')
common_tags = common.CHECK_TAGS + [
'snmp_profile:checkpoint-firewall',
'device_vendor:checkpoint',
]
common.assert_common_metrics(aggregator, common_tags)
cpu_metrics = [
'multiProcUserTime',
'multiProcSystemTime',
'multiProcIdleTime',
'multiProcUsage',
]
cpu_cores = [7097, 13039, 13761, 28994, 29751, 33826, 40053, 48847, 61593, 65044]
for core in cpu_cores:
tags = ['cpu_core:{}'.format(core)] + common_tags
for metric in cpu_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_metric('snmp.procNum', metric_type=aggregator.GAUGE, tags=common_tags)
mem_metrics = ['memTotalReal64', 'memActiveReal64', 'memFreeReal64', 'memTotalVirtual64', 'memActiveVirtual64']
for metric in mem_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags)
disk_metrics = [
'multiDiskSize',
'multiDiskUsed',
'multiDiskFreeTotalBytes',
'multiDiskFreeAvailableBytes',
'multiDiskFreeTotalPercent',
'multiDiskFreeAvailablePercent',
]
appliance_metrics = [
'fanSpeedSensorValue',
'fanSpeedSensorStatus',
'tempertureSensorValue',
'tempertureSensorStatus',
]
common_indices = range(10)
common_names = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth']
for idx in common_indices:
name = common_names[idx]
tags = ['disk_index:{}'.format(idx), 'disk_name:{}'.format(name)] + common_tags
for metric in disk_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
tags = ['sensor_index:{}'.format(idx), 'sensor_name:{}'.format(name)] + common_tags
for metric in appliance_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
fw_count_metrics = ['fwAccepted', 'fwDropped', 'fwRejected']
for metric in fw_count_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags)
fw_gauge_metrics = ['fwNumConn', 'fwPeakNumConn']
for metric in fw_gauge_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_arista(aggregator):
run_profile_check('arista')
common_tags = common.CHECK_TAGS + ['snmp_profile:arista', 'device_vendor:arista']
common.assert_common_metrics(aggregator, common_tags)
aggregator.assert_metric(
'snmp.aristaEgressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:13', 'queue_index:10'],
count=1,
)
aggregator.assert_metric(
'snmp.aristaEgressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:28', 'queue_index:22'],
count=1,
)
aggregator.assert_metric(
'snmp.aristaIngressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:7', 'queue_index:25'],
count=1,
)
aggregator.assert_metric(
'snmp.aristaIngressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:8', 'queue_index:24'],
count=1,
)
for (sensor_id, sensor_type) in [(1, 11), (7, 8)]:
sensor_tags = ['sensor_id:{}'.format(sensor_id), 'sensor_type:{}'.format(sensor_type)] + common_tags
aggregator.assert_metric('snmp.entPhySensorValue', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
aggregator.assert_metric('snmp.entPhySensorOperStatus', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_aruba(aggregator):
run_profile_check('aruba')
common_tags = common.CHECK_TAGS + ['snmp_profile:aruba', 'device_vendor:aruba']
common.assert_common_metrics(aggregator, common_tags)
for fan in [18, 28]:
fan_tags = common_tags + ['fan_index:{}'.format(fan)]
aggregator.assert_metric('snmp.sysExtFanStatus', metric_type=aggregator.GAUGE, tags=fan_tags, count=1)
for psu in [1, 17]:
psu_tags = common_tags + ['powersupply_index:{}'.format(psu)]
aggregator.assert_metric('snmp.sysExtPowerSupplyStatus', metric_type=aggregator.GAUGE, tags=psu_tags, count=1)
for proc in [11, 26]:
proc_tags = common_tags + ['processor_index:{}'.format(proc)]
aggregator.assert_metric('snmp.sysExtProcessorLoad', metric_type=aggregator.GAUGE, tags=proc_tags, count=1)
for mem in [3, 20]:
mem_tags = common_tags + ['memory_index:{}'.format(mem)]
aggregator.assert_metric('snmp.sysExtMemorySize', metric_type=aggregator.GAUGE, tags=mem_tags, count=1)
aggregator.assert_metric('snmp.sysExtMemoryUsed', metric_type=aggregator.GAUGE, tags=mem_tags, count=1)
aggregator.assert_metric('snmp.sysExtMemoryFree', metric_type=aggregator.GAUGE, tags=mem_tags, count=1)
aggregator.assert_metric(
'snmp.wlsxSysExtPacketLossPercent', metric_type=aggregator.GAUGE, tags=common_tags, count=1
)
neighbor_metrics = [
('ospfNbrEvents', aggregator.RATE),
('ospfNbrState', aggregator.GAUGE),
('ospfNbrLsRetransQLen', aggregator.GAUGE),
]
for metric, metric_type in neighbor_metrics:
tags = ['neighbor_ip:192.29.116.26', 'neighbor_id:192.29.66.79'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=metric_type, tags=tags, count=1)
virtual_neighbor_metrics = [
('ospfVirtNbrState', aggregator.GAUGE),
('ospfVirtNbrEvents', aggregator.RATE),
('ospfVirtNbrLsRetransQLen', aggregator.GAUGE),
]
for metric, metric_type in virtual_neighbor_metrics:
for ip, nbr in [('74.210.82.1', '194.154.66.112'), ('122.226.86.1', '184.201.101.140')]:
tags = ['neighbor_ip:{}'.format(ip), 'neighbor_id:{}'.format(nbr)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=metric_type, tags=tags, count=1)
lls_metrics = ['ospfIfRetransInterval', 'ospfIfState', 'ospfIfLsaCount']
for metric in lls_metrics:
for ip, nbr in [('58.115.169.188', '192.29.66.79'), ('18.2.8.29', '118.246.193.247')]:
tags = ['ospf_ip_addr:{}'.format(ip), 'neighbor_id:{}'.format(nbr)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
virtual_lls_metrics = ['ospfVirtIfRetransInterval', 'ospfVirtIfState', 'ospfVirtIfLsaCount']
for metric in virtual_lls_metrics:
for nbr in ['194.154.66.112', '184.201.101.140']:
tags = ['neighbor_id:{}'.format(nbr)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_chatsworth(aggregator):
profile = "chatsworth_pdu"
run_profile_check(profile)
legacy_global_tags = [
'legacy_pdu_macaddress:00:0E:D3:AA:CC:EE',
'legacy_pdu_model:P10-1234-ABC',
'legacy_pdu_name:legacy-name1',
'legacy_pdu_version:1.2.3',
]
common_tags = common.CHECK_TAGS + legacy_global_tags + ['snmp_profile:' + profile, 'device_vendor:chatsworth']
common.assert_common_metrics(aggregator, common_tags)
legacy_pdu_tags = common_tags
legacy_pdu_gauge_metrics = [
'snmp.pduRole',
'snmp.outOfService',
]
legacy_pdu_monotonic_count_metrics = []
for line in range(1, 4):
legacy_pdu_gauge_metrics.append('snmp.line{}curr'.format(line))
for branch in range(1, 3):
legacy_pdu_gauge_metrics.append('snmp.temperatureProbe{}'.format(branch))
legacy_pdu_gauge_metrics.append('snmp.humidityProbe{}'.format(branch))
for xyz in ['xy', 'yz', 'zx']:
legacy_pdu_monotonic_count_metrics.append('snmp.energy{}{}s'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.voltage{}{}'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.power{}{}'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.powerFact{}{}'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.current{}{}'.format(xyz, branch))
for branch in range(1, 25):
legacy_pdu_monotonic_count_metrics.append('snmp.receptacleEnergyoutlet{}s'.format(branch))
legacy_pdu_gauge_metrics.append('snmp.outlet{}Current'.format(branch))
for metric in legacy_pdu_gauge_metrics:
aggregator.assert_metric(metric, metric_type=aggregator.GAUGE, tags=legacy_pdu_tags, count=1)
for metric in legacy_pdu_monotonic_count_metrics:
aggregator.assert_metric(metric, metric_type=aggregator.MONOTONIC_COUNT, tags=legacy_pdu_tags, count=1)
pdu_tags = common_tags + [
'pdu_cabinetid:cab1',
'pdu_ipaddress:42.2.210.224',
'pdu_macaddress:0x00249b3503f6',
'pdu_model:model1',
'pdu_name:name1',
'pdu_version:v1.1',
]
aggregator.assert_metric('snmp.cpiPduNumberBranches', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduNumberOutlets', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutOfService', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduUpgrade', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduChainRole', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduTotalPower', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
for lock in [1, 2]:
lock_tags = common_tags + ['lock_id:{}'.format(lock)]
aggregator.assert_metric('snmp.cpiPduEasStatus', metric_type=aggregator.GAUGE, tags=lock_tags, count=1)
aggregator.assert_metric('snmp.cpiPduDoorStatus', metric_type=aggregator.GAUGE, tags=lock_tags, count=1)
aggregator.assert_metric('snmp.cpiPduLockStatus', metric_type=aggregator.GAUGE, tags=lock_tags, count=1)
for (sensor_name, sensor_index) in [('sensor1', 4), ('sensor2', 6)]:
sensor_tags = common_tags + [
'sensor_index:{}'.format(sensor_index),
'sensor_name:{}'.format(sensor_name),
'sensor_type:1',
]
aggregator.assert_metric('snmp.cpiPduSensorValue', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
for line in [6, 18]:
line_tags = common_tags + ['line_id:{}'.format(line)]
aggregator.assert_metric('snmp.cpiPduLineCurrent', metric_type=aggregator.GAUGE, tags=line_tags, count=1)
for branch in [1, 17]:
branch_tags = common_tags + ['branch_id:{}'.format(branch), 'pdu_name:name1']
aggregator.assert_metric('snmp.cpiPduBranchCurrent', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric('snmp.cpiPduBranchMaxCurrent', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric('snmp.cpiPduBranchVoltage', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric('snmp.cpiPduBranchPower', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric(
'snmp.cpiPduBranchPowerFactor', metric_type=aggregator.GAUGE, tags=branch_tags, count=1
)
aggregator.assert_metric('snmp.cpiPduBranchStatus', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric(
'snmp.cpiPduBranchEnergy', metric_type=aggregator.MONOTONIC_COUNT, tags=branch_tags, count=1
)
for branch in [1]:
branch_tags = common_tags + ['branch_id:{}'.format(branch), 'pdu_name:name2']
aggregator.assert_metric(
'snmp.cpiPduBranchPowerFactor', metric_type=aggregator.GAUGE, tags=branch_tags, count=1
)
aggregator.assert_metric(
'snmp.cpiPduBranchEnergy', metric_type=aggregator.MONOTONIC_COUNT, tags=branch_tags, count=1
)
for (outlet_id, outlet_branch, outlet_name) in [(7, 29, 'outlet1'), (16, 23, 'outlet2')]:
outlet_tags = common_tags + [
'outlet_id:{}'.format(outlet_id),
'outlet_branchid:{}'.format(outlet_branch),
'outlet_name:{}'.format(outlet_name),
]
aggregator.assert_metric('snmp.cpiPduOutletCurrent', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutletVoltage', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutletPower', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutletStatus', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric(
'snmp.cpiPduOutletEnergy', metric_type=aggregator.MONOTONIC_COUNT, tags=outlet_tags, count=1
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_isilon(aggregator):
run_profile_check('isilon')
common_tags = common.CHECK_TAGS + [
'snmp_profile:isilon',
'cluster_name:testcluster1',
'node_name:node1',
'node_type:1',
'device_vendor:dell',
]
cluster_rates = [
'clusterIfsInBytes',
'clusterIfsOutBytes',
]
node_rates = [
'nodeIfsOutBytes',
'nodeIfsInBytes',
]
protocol_metrics = [
'protocolOpsPerSecond',
'latencyMin',
'latencyMax',
'latencyAverage',
]
quota_metrics = ['quotaHardThreshold', 'quotaSoftThreshold', 'quotaUsage', 'quotaAdvisoryThreshold']
quota_ids_types = [
(422978632, 1),
(153533730, 5),
(3299369987, 4),
(2149993012, 3),
(1424325378, 1),
(4245321451, 0),
(2328145711, 1),
(1198032230, 4),
(1232918362, 1),
(1383990869, 1),
]
common.assert_common_metrics(aggregator, common_tags)
for metric in quota_metrics:
for qid, qtype in quota_ids_types:
tags = ['quota_id:{}'.format(qid), 'quota_type:{}'.format(qtype)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in protocol_metrics:
for num in range(1, 3):
tags = ['protocol_name:testprotocol{}'.format(num)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.clusterHealth', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in cluster_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=common_tags, count=1)
aggregator.assert_metric('snmp.nodeHealth', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in node_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=common_tags, count=1)
for fan in [4, 6, 10, 11, 14, 21, 22, 23, 25, 30]:
tags = ['fan_name:testfan', 'fan_number:{}'.format(fan)] + common_tags
aggregator.assert_metric('snmp.fanSpeed', metric_type=aggregator.GAUGE, tags=tags, count=1)
for status, bay in [('SMARTFAIL', 1), ('HEALTHY', 2), ('DEAD', 3)]:
tags = common_tags + ['disk_status:{}'.format(status), 'disk_bay:{}'.format((bay))]
aggregator.assert_metric('snmp.diskSizeBytes', metric_type=aggregator.RATE, tags=tags)
aggregator.assert_metric('snmp.ifsUsedBytes', metric_type=aggregator.RATE, tags=common_tags, count=1)
aggregator.assert_metric('snmp.ifsTotalBytes', metric_type=aggregator.RATE, tags=common_tags, count=1)
@pytest.mark.usefixtures("dd_environment")
def test_apc_ups(aggregator):
run_profile_check('apc_ups')
profile_tags = [
'snmp_profile:apc_ups',
'model:APC Smart-UPS 600',
'firmware_version:2.0.3-test',
'serial_num:test_serial',
'ups_name:testIdentName',
'device_vendor:apc',
]
tags = common.CHECK_TAGS + profile_tags
metrics = [
'upsAdvBatteryNumOfBadBattPacks',
'upsAdvBatteryReplaceIndicator',
'upsAdvBatteryRunTimeRemaining',
'upsAdvBatteryTemperature',
'upsAdvBatteryCapacity',
'upsHighPrecInputFrequency',
'upsHighPrecInputLineVoltage',
'upsHighPrecOutputCurrent',
'upsAdvInputLineFailCause',
'upsAdvOutputLoad',
'upsBasicBatteryTimeOnBattery',
'upsAdvTestDiagnosticsResults',
]
common.assert_common_metrics(aggregator, tags)
for metric in metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric(
'snmp.upsOutletGroupStatusGroupState',
metric_type=aggregator.GAUGE,
tags=['outlet_group_name:test_outlet'] + tags,
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.AVRTrimActive', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.BatteriesDischarged', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.LowBatteryOnBattery', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.NoBatteriesAttached', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.OnLine', 0, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.ReplaceBattery', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric('snmp.upsBasicStateOutputState.On', 1, metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_fortinet_fortigate(aggregator):
run_profile_check('fortinet-fortigate')
common_tags = common.CHECK_TAGS + [
'snmp_profile:fortinet-fortigate',
'device_vendor:fortinet',
]
common_gauge_metrics = [
'fgSysCpuUsage',
'fgSysMemUsage',
'fgSysMemCapacity',
'fgSysLowMemUsage',
'fgSysLowMemCapacity',
'fgSysDiskUsage',
'fgSysDiskCapacity',
'fgSysSesCount',
'fgSysSesRate1',
'fgSysSes6Count',
'fgSysSes6Rate1',
'fgApHTTPConnections',
'fgApHTTPMaxConnections',
'fgVdNumber',
'fgVdMaxVdoms',
]
processor_gauge_metrics = [
'fgProcessorUsage',
'fgProcessorSysUsage',
]
processor_count_metrics = [
'fgProcessorPktRxCount',
'fgProcessorPktTxCount',
'fgProcessorPktDroppedCount',
]
processor_tags = common_tags + ['processor_index:12']
vd_metrics = [
'fgVdEntOpMode',
'fgVdEntHaState',
'fgVdEntCpuUsage',
'fgVdEntMemUsage',
'fgVdEntSesCount',
'fgVdEntSesRate',
]
vd_tags = common_tags + ['virtualdomain_index:4', 'virtualdomain_name:their oxen quaintly']
common.assert_common_metrics(aggregator, common_tags)
for metric in common_gauge_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in processor_gauge_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=processor_tags, count=1)
for metric in processor_count_metrics:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=processor_tags, count=1
)
aggregator.assert_metric(
'snmp.{}.rate'.format(metric), metric_type=aggregator.RATE, tags=processor_tags, count=1
)
for metric in vd_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=vd_tags, count=1)
aggregator.assert_metric('snmp.fgIntfEntVdom', metric_type=aggregator.GAUGE, count=1)
firewall_tags = common_tags + ['policy_index:22']
for metric in ['fgFwPolPktCount', 'fgFwPolByteCount']:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=firewall_tags, count=1
)
aggregator.assert_metric(
'snmp.{}.rate'.format(metric), metric_type=aggregator.RATE, tags=firewall_tags, count=1
)
firewall6_tags = common_tags + ['policy6_index:29']
for metric in ['fgFwPol6PktCount', 'fgFwPol6ByteCount']:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=firewall6_tags, count=1
)
aggregator.assert_metric(
'snmp.{}.rate'.format(metric), metric_type=aggregator.RATE, tags=firewall6_tags, count=1
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_netapp(aggregator):
run_profile_check('netapp')
profile_tags = [
'snmp_profile:netapp',
'snmp_host:example-datacenter.company',
'device_vendor:netapp',
]
common_tags = common.CHECK_TAGS + profile_tags
common.assert_common_metrics(aggregator, common_tags)
gauges = [
'cfInterconnectStatus',
'miscCacheAge',
'ncHttpActiveCliConns',
]
counts = [
'extcache64Hits',
]
for metric in gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
snapvault_counts = [
'svTotalFailures',
]
snapvaults = [('5', '/vol/dir1', '5'), ('6', '/vol/dir3', '2'), ('18', '/vol/dir9', '4')]
for metric in snapvault_counts:
for index, destination, state in snapvaults:
tags = [
'index:{}'.format(index),
'destination:{}'.format(destination),
'state:{}'.format(state),
] + common_tags
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
snapmirrors = [('6', '1'), ('9', '5'), ('29', '1')]
snapmirror_gauges = [
'snapmirrorLag',
]
snapmirror_counts = [
'snapmirrorTotalFailures',
]
for index, state in snapmirrors:
tags = ['index:{}'.format(index), 'state:{}'.format(state)] + common_tags
for metric in snapmirror_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in snapmirror_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
filesystem_gauges = [
'dfHighTotalKBytes',
'dfHighAvailKBytes',
'dfInodesUsed',
'dfInodesFree',
]
filesystem_indexes = [
'1022',
'1023',
'1024',
'1025',
'1026',
'1027',
'1028',
'1029',
'1032',
'1033',
]
filesystems = ['/vol/dir{}'.format(n) for n in range(1, len(filesystem_indexes) + 1)]
for metric in filesystem_gauges:
for index, filesystem in zip(filesystem_indexes, filesystems):
tags = ['index:{}'.format(index), 'filesystem:{}'.format(filesystem)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
if_counts = [
'ifHighInOctets',
]
if_rates = [
'ifHighInOctets.rate',
]
interfaces = [
('6', 'netgear ifX300 v1'),
('7', 'junyper proto12 12.3'),
('23', 'malabar yz42 10.2020'),
]
for index, descr in interfaces:
tags = ['index:{}'.format(index), 'interface:{}'.format(descr)] + common_tags
for metric in if_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in if_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_catalyst(aggregator):
run_profile_check('cisco-catalyst')
common_tags = common.CHECK_TAGS + [
'snmp_host:catalyst-6000.example',
'snmp_profile:cisco-catalyst',
'device_vendor:cisco',
]
sensors = [5, 9]
for sensor in sensors:
tags = ['sensor_id:{}'.format(sensor), 'sensor_type:10'] + common_tags
aggregator.assert_metric('snmp.entSensorValue', metric_type=aggregator.GAUGE, tags=tags, count=1)
interfaces = ["Gi1/0/{}".format(i) for i in [6, 10, 12, 18, 22, 25, 27]]
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in CIE_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
frus = [1001, 1010, 2001, 2010]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_metric('snmp.devices_monitored', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_juniper_ex(aggregator):
run_profile_check('juniper-ex')
common_tags = common.CHECK_TAGS + [
'snmp_profile:juniper-ex',
'device_vendor:juniper-networks',
]
_check_juniper_virtual_chassis(aggregator, common_tags)
_check_juniper_dcu(aggregator, common_tags)
_check_juniper_cos(aggregator, common_tags)
_check_juniper_firewall(aggregator, common_tags)
aggregator.assert_metric('snmp.devices_monitored', count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_juniper_mx(aggregator):
run_profile_check('juniper-mx')
common_tags = common.CHECK_TAGS + [
'snmp_profile:juniper-mx',
'device_vendor:juniper-networks',
]
_check_juniper_virtual_chassis(aggregator, common_tags)
_check_juniper_firewall(aggregator, common_tags)
aggregator.assert_metric('snmp.devices_monitored', count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_juniper_srx(aggregator):
run_profile_check('juniper-srx')
common_tags = common.CHECK_TAGS + [
'snmp_profile:juniper-srx',
'device_vendor:juniper-networks',
]
_check_juniper_userfirewall(aggregator, common_tags)
_check_juniper_dcu(aggregator, common_tags)
_check_juniper_scu(aggregator, common_tags)
aggregator.assert_metric('snmp.devices_monitored', count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
def _check_juniper_scu(aggregator, common_tags):
scu_tags = [
['address_family:1', 'interface:kept but'],
['address_family:1', 'interface:quaintly driving oxen their zombies oxen acted acted'],
['address_family:1', 'interface:but forward kept but their driving oxen quaintly acted'],
]
for metric in SCU_COUNTS:
for tags in scu_tags:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags + tags, count=1
)
def _check_juniper_userfirewall(aggregator, common_tags):
userfirewall_tags = [
['ldap_domain_name:Mycroft Holmes', 'ldap_host:brother'],
['ldap_domain_name:Jim Moriarty', 'ldap_host:enemy'],
]
for metric in USER_FIREWALL:
for tags in userfirewall_tags:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags + tags, count=1
)
def _check_juniper_dcu(aggregator, common_tags):
dcu_tags = [
[
'address_family:1',
'destination_class_name:their',
'interface:quaintly driving oxen their zombies oxen acted acted',
],
[
'address_family:1',
'destination_class_name:acted but forward acted zombies forward',
'interface:but forward kept but their driving oxen quaintly acted',
],
[
'address_family:2',
'destination_class_name:oxen Jaded oxen Jaded forward kept quaintly',
'interface:kept but',
],
]
for decu_metric in DCU_COUNTS:
for tags in dcu_tags:
aggregator.assert_metric(
'snmp.{}'.format(decu_metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags + tags, count=1
)
def _check_juniper_firewall(aggregator, common_tags):
firewall_tags = [
[
'counter_name:Jaded oxen kept their driving but kept',
'counter_type:4',
'firewall_filter_name:their driving quaintly but Jaded oxen',
],
[
'counter_name:but but but their their their kept kept forward',
'counter_type:4',
'firewall_filter_name:driving kept acted Jaded zombies kept acted',
],
]
for metric in FIREWALL_COUNTS:
for tags in firewall_tags:
aggregator.assert_metric(
'snmp.{}'.format(metric),
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + tags,
count=1,
)
def _check_juniper_virtual_chassis(aggregator, common_tags):
virtual_chassis_tags = [
['port_name:but driving but'],
['port_name:Jaded forward but oxen quaintly their their'],
['port_name:forward forward driving driving Jaded Jaded'],
]
for count_and_rate_metric in VIRTUAL_CHASSIS_COUNTS:
for tags in virtual_chassis_tags:
aggregator.assert_metric(
'snmp.{}'.format(count_and_rate_metric),
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + tags,
count=1,
)
for rate_metric in VIRTUAL_CHASSIS_RATES:
for tags in virtual_chassis_tags:
aggregator.assert_metric(
'snmp.{}'.format(rate_metric), metric_type=aggregator.GAUGE, tags=common_tags + tags, count=1
)
def _check_juniper_cos(aggregator, common_tags):
cos_tags = [
['interface:acted oxen oxen forward quaintly kept zombies but oxen', 'queue_number:25'],
['interface:acted kept quaintly acted oxen kept', 'queue_number:50'],
['interface:their', 'queue_number:15'],
]
for cos_metric in COS_COUNTS:
for tags in cos_tags:
aggregator.assert_metric(
'snmp.{}'.format(cos_metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags + tags, count=1
)
for cos_metric in COS_RATES:
for tags in cos_tags:
aggregator.assert_metric(
'snmp.{}'.format(cos_metric), metric_type=aggregator.GAUGE, tags=common_tags + tags, count=1
)
| true | true |
f724c766689f3310fcf1ff1658220beb097c094a | 30,199 | py | Python | Ingredient_Extractor/with_unknown_words_concideration/with_various_accuracies_on_first_layer/mean_values_1.py | ziko1305/Hidden-Markov-Based-Mathematical-Model | 0ad906e6c4f99ad91d4047aed78df49399447633 | [
"MIT"
] | null | null | null | Ingredient_Extractor/with_unknown_words_concideration/with_various_accuracies_on_first_layer/mean_values_1.py | ziko1305/Hidden-Markov-Based-Mathematical-Model | 0ad906e6c4f99ad91d4047aed78df49399447633 | [
"MIT"
] | null | null | null | Ingredient_Extractor/with_unknown_words_concideration/with_various_accuracies_on_first_layer/mean_values_1.py | ziko1305/Hidden-Markov-Based-Mathematical-Model | 0ad906e6c4f99ad91d4047aed78df49399447633 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 20 09:49:40 2020
@author: Mehdi
"""
import numpy as np
a1=np.nanmean([table_11.loc['A'].accuracy,table_12.loc['A'].accuracy,table_13.loc['A'].accuracy,table_14.loc['A'].accuracy,
table_15.loc['A'].accuracy,table_16.loc['A'].accuracy,table_17.loc['A'].accuracy,table_18.loc['A'].accuracy,
table_19.loc['A'].accuracy,table_110.loc['A'].accuracy])
a2=np.nanmean([table_11.loc['A'].f1_score,table_12.loc['A'].f1_score,table_13.loc['A'].f1_score,table_14.loc['A'].f1_score,
table_15.loc['A'].f1_score,table_16.loc['A'].f1_score,table_17.loc['A'].f1_score,table_18.loc['A'].f1_score,
table_19.loc['A'].f1_score,table_110.loc['A'].f1_score])
a3=np.nanmean([table_11.loc['A'][2],table_12.loc['A'][2],table_13.loc['A'][2],table_14.loc['A'][2],
table_15.loc['A'][2],table_16.loc['A'][2],table_17.loc['A'][2],table_18.loc['A'][2],
table_19.loc['A'][2],table_110.loc['A'][2]])
a4=np.nanmean([table_11.loc['A'][3],table_12.loc['A'][3],table_13.loc['A'][3],table_14.loc['A'][3],
table_15.loc['A'][3],table_16.loc['A'][3],table_17.loc['A'][3],table_18.loc['A'][3],
table_19.loc['A'][3],table_110.loc['A'][3]])
a5=np.nanmean([table_11.loc['A'][4],table_12.loc['A'][4],table_13.loc['A'][4],table_14.loc['A'][4],
table_15.loc['A'][4],table_16.loc['A'][4],table_17.loc['A'][4],table_18.loc['A'][4],
table_19.loc['A'][4],table_110.loc['A'][4]])
a6=np.nanmean([table_11.loc['A'][5],table_12.loc['A'][5],table_13.loc['A'][5],table_14.loc['A'][5],
table_15.loc['A'][5],table_16.loc['A'][5],table_17.loc['A'][5],table_18.loc['A'][5],
table_19.loc['A'][5],table_110.loc['A'][5]])
a7=np.nanmean([table_11.loc['B'].accuracy,table_12.loc['B'].accuracy,table_13.loc['B'].accuracy,table_14.loc['B'].accuracy,
table_15.loc['B'].accuracy,table_16.loc['B'].accuracy,table_17.loc['B'].accuracy,table_18.loc['B'].accuracy,
table_19.loc['B'].accuracy,table_110.loc['B'].accuracy])
a8=np.nanmean([table_11.loc['B'].f1_score,table_12.loc['B'].f1_score,table_13.loc['B'].f1_score,table_14.loc['B'].f1_score,
table_15.loc['B'].f1_score,table_16.loc['B'].f1_score,table_17.loc['B'].f1_score,table_18.loc['B'].f1_score,
table_19.loc['B'].f1_score,table_110.loc['B'].f1_score])
a9=np.nanmean([table_11.loc['B'][2],table_12.loc['B'][2],table_13.loc['B'][2],table_14.loc['B'][2],
table_15.loc['B'][2],table_16.loc['B'][2],table_17.loc['B'][2],table_18.loc['B'][2],
table_19.loc['B'][2],table_110.loc['B'][2]])
a10=np.nanmean([table_11.loc['B'][3],table_12.loc['B'][3],table_13.loc['B'][3],table_14.loc['B'][3],
table_15.loc['B'][3],table_16.loc['B'][3],table_17.loc['B'][3],table_18.loc['B'][3],
table_19.loc['B'][3],table_110.loc['B'][3]])
a11=np.nanmean([table_11.loc['B'][4],table_12.loc['B'][4],table_13.loc['B'][4],table_14.loc['B'][4],
table_15.loc['B'][4],table_16.loc['B'][4],table_17.loc['B'][4],table_18.loc['B'][4],
table_19.loc['B'][4],table_110.loc['B'][4]])
a12=np.nanmean([table_11.loc['B'][5],table_12.loc['B'][5],table_13.loc['B'][5],table_14.loc['B'][5],
table_15.loc['B'][5],table_16.loc['B'][5],table_17.loc['B'][5],table_18.loc['B'][5],
table_19.loc['B'][5],table_110.loc['B'][5]])
a13=np.nanmean([table_11.loc['C'].accuracy,table_12.loc['C'].accuracy,table_13.loc['C'].accuracy,table_14.loc['C'].accuracy,
table_15.loc['C'].accuracy,table_16.loc['C'].accuracy,table_17.loc['C'].accuracy,table_18.loc['C'].accuracy,
table_19.loc['C'].accuracy,table_110.loc['C'].accuracy])
a14=np.nanmean([table_11.loc['C'].f1_score,table_12.loc['C'].f1_score,table_13.loc['C'].f1_score,table_14.loc['C'].f1_score,
table_15.loc['C'].f1_score,table_16.loc['C'].f1_score,table_17.loc['C'].f1_score,table_18.loc['C'].f1_score,
table_19.loc['C'].f1_score,table_110.loc['C'].f1_score])
a15=np.nanmean([table_11.loc['C'][2],table_12.loc['C'][2],table_13.loc['C'][2],table_14.loc['C'][2],
table_15.loc['C'][2],table_16.loc['C'][2],table_17.loc['C'][2],table_18.loc['C'][2],
table_19.loc['C'][2],table_110.loc['C'][2]])
a16=np.nanmean([table_11.loc['C'][3],table_12.loc['C'][3],table_13.loc['C'][3],table_14.loc['C'][3],
table_15.loc['C'][3],table_16.loc['C'][3],table_17.loc['C'][3],table_18.loc['C'][3],
table_19.loc['C'][3],table_110.loc['C'][3]])
a17=np.nanmean([table_11.loc['C'][4],table_12.loc['C'][4],table_13.loc['C'][4],table_14.loc['C'][4],
table_15.loc['C'][4],table_16.loc['C'][4],table_17.loc['C'][4],table_18.loc['C'][4],
table_19.loc['C'][4],table_110.loc['C'][4]])
a18=np.nanmean([table_11.loc['C'][5],table_12.loc['C'][5],table_13.loc['C'][5],table_14.loc['C'][5],
table_15.loc['C'][5],table_16.loc['C'][5],table_17.loc['C'][5],table_18.loc['C'][5],
table_19.loc['C'][5],table_110.loc['C'][5]])
a19=np.nanmean([table_11.loc['D'].accuracy,table_12.loc['D'].accuracy,table_13.loc['D'].accuracy,table_14.loc['D'].accuracy,
table_15.loc['D'].accuracy,table_16.loc['D'].accuracy,table_17.loc['D'].accuracy,table_18.loc['D'].accuracy,
table_19.loc['D'].accuracy,table_110.loc['D'].accuracy])
a20=np.nanmean([table_11.loc['D'].f1_score,table_12.loc['D'].f1_score,table_13.loc['D'].f1_score,table_14.loc['D'].f1_score,
table_15.loc['D'].f1_score,table_16.loc['D'].f1_score,table_17.loc['D'].f1_score,table_18.loc['D'].f1_score,
table_19.loc['D'].f1_score,table_110.loc['D'].f1_score])
a21=np.nanmean([table_11.loc['D'][2],table_12.loc['D'][2],table_13.loc['D'][2],table_14.loc['D'][2],
table_15.loc['D'][2],table_16.loc['D'][2],table_17.loc['D'][2],table_18.loc['D'][2],
table_19.loc['D'][2],table_110.loc['D'][2]])
a22=np.nanmean([table_11.loc['D'][3],table_12.loc['D'][3],table_13.loc['D'][3],table_14.loc['D'][3],
table_15.loc['D'][3],table_16.loc['D'][3],table_17.loc['D'][3],table_18.loc['D'][3],
table_19.loc['D'][3],table_110.loc['D'][3]])
a23=np.nanmean([table_11.loc['D'][4],table_12.loc['D'][4],table_13.loc['D'][4],table_14.loc['D'][4],
table_15.loc['D'][4],table_16.loc['D'][4],table_17.loc['D'][4],table_18.loc['D'][4],
table_19.loc['D'][4],table_110.loc['D'][4]])
a24=np.nanmean([table_11.loc['D'][5],table_12.loc['D'][5],table_13.loc['D'][5],table_14.loc['D'][5],
table_15.loc['D'][5],table_16.loc['D'][5],table_17.loc['D'][5],table_18.loc['D'][5],
table_19.loc['D'][5],table_110.loc['D'][5]])
a25=np.nanmean([table_11.loc['E'].accuracy,table_12.loc['E'].accuracy,table_13.loc['E'].accuracy,table_14.loc['E'].accuracy,
table_15.loc['E'].accuracy,table_16.loc['E'].accuracy,table_17.loc['E'].accuracy,table_18.loc['E'].accuracy,
table_19.loc['E'].accuracy,table_110.loc['E'].accuracy])
a26=np.nanmean([table_11.loc['E'].f1_score,table_12.loc['E'].f1_score,table_13.loc['E'].f1_score,table_14.loc['E'].f1_score,
table_15.loc['E'].f1_score,table_16.loc['E'].f1_score,table_17.loc['E'].f1_score,table_18.loc['E'].f1_score,
table_19.loc['E'].f1_score,table_110.loc['E'].f1_score])
a27=np.nanmean([table_11.loc['E'][2],table_12.loc['E'][2],table_13.loc['E'][2],table_14.loc['E'][2],
table_15.loc['E'][2],table_16.loc['E'][2],table_17.loc['E'][2],table_18.loc['E'][2],
table_19.loc['E'][2],table_110.loc['E'][2]])
a28=np.nanmean([table_11.loc['E'][3],table_12.loc['E'][3],table_13.loc['E'][3],table_14.loc['E'][3],
table_15.loc['E'][3],table_16.loc['E'][3],table_17.loc['E'][3],table_18.loc['E'][3],
table_19.loc['E'][3],table_110.loc['E'][3]])
a29=np.nanmean([table_11.loc['E'][4],table_12.loc['E'][4],table_13.loc['E'][4],table_14.loc['E'][4],
table_15.loc['E'][4],table_16.loc['E'][4],table_17.loc['E'][4],table_18.loc['E'][4],
table_19.loc['E'][4],table_110.loc['E'][4]])
a30=np.nanmean([table_11.loc['E'][5],table_12.loc['E'][5],table_13.loc['E'][5],table_14.loc['E'][5],
table_15.loc['E'][5],table_16.loc['E'][5],table_17.loc['E'][5],table_18.loc['E'][5],
table_19.loc['E'][5],table_110.loc['E'][5]])
a31=np.nanmean([table_11.loc['F'].accuracy,table_12.loc['F'].accuracy,table_13.loc['F'].accuracy,table_14.loc['F'].accuracy,
table_15.loc['F'].accuracy,table_16.loc['F'].accuracy,table_17.loc['F'].accuracy,table_18.loc['F'].accuracy,
table_19.loc['F'].accuracy,table_110.loc['F'].accuracy])
a32=np.nanmean([table_11.loc['F'].f1_score,table_12.loc['F'].f1_score,table_13.loc['F'].f1_score,table_14.loc['F'].f1_score,
table_15.loc['F'].f1_score,table_16.loc['F'].f1_score,table_17.loc['F'].f1_score,table_18.loc['F'].f1_score,
table_19.loc['F'].f1_score,table_110.loc['F'].f1_score])
a33=np.nanmean([table_11.loc['F'][2],table_12.loc['F'][2],table_13.loc['F'][2],table_14.loc['F'][2],
table_15.loc['F'][2],table_16.loc['F'][2],table_17.loc['F'][2],table_18.loc['F'][2],
table_19.loc['F'][2],table_110.loc['F'][2]])
a34=np.nanmean([table_11.loc['F'][3],table_12.loc['F'][3],table_13.loc['F'][3],table_14.loc['F'][3],
table_15.loc['F'][3],table_16.loc['F'][3],table_17.loc['F'][3],table_18.loc['F'][3],
table_19.loc['F'][3],table_110.loc['F'][3]])
a35=np.nanmean([table_11.loc['F'][4],table_12.loc['F'][4],table_13.loc['F'][4],table_14.loc['F'][4],
table_15.loc['F'][4],table_16.loc['F'][4],table_17.loc['F'][4],table_18.loc['F'][4],
table_19.loc['F'][4],table_110.loc['F'][4]])
a36=np.nanmean([table_11.loc['F'][5],table_12.loc['F'][5],table_13.loc['F'][5],table_14.loc['F'][5],
table_15.loc['F'][5],table_16.loc['F'][5],table_17.loc['F'][5],table_18.loc['F'][5],
table_19.loc['F'][5],table_110.loc['F'][5]])
a37=np.nanmean([table_11.loc['G'].accuracy,table_12.loc['G'].accuracy,table_13.loc['G'].accuracy,table_14.loc['G'].accuracy,
table_15.loc['G'].accuracy,table_16.loc['G'].accuracy,table_17.loc['G'].accuracy,table_18.loc['G'].accuracy,
table_19.loc['G'].accuracy,table_110.loc['G'].accuracy])
a38=np.nanmean([table_11.loc['G'].f1_score,table_12.loc['G'].f1_score,table_13.loc['G'].f1_score,table_14.loc['G'].f1_score,
table_15.loc['G'].f1_score,table_16.loc['G'].f1_score,table_17.loc['G'].f1_score,table_18.loc['G'].f1_score,
table_19.loc['G'].f1_score,table_110.loc['G'].f1_score])
a39=np.nanmean([table_11.loc['G'][2],table_12.loc['G'][2],table_13.loc['G'][2],table_14.loc['G'][2],
table_15.loc['G'][2],table_16.loc['G'][2],table_17.loc['G'][2],table_18.loc['G'][2],
table_19.loc['G'][2],table_110.loc['G'][2]])
a40=np.nanmean([table_11.loc['G'][3],table_12.loc['G'][3],table_13.loc['G'][3],table_14.loc['G'][3],
table_15.loc['G'][3],table_16.loc['G'][3],table_17.loc['G'][3],table_18.loc['G'][3],
table_19.loc['G'][3],table_110.loc['G'][3]])
a41=np.nanmean([table_11.loc['G'][4],table_12.loc['G'][4],table_13.loc['G'][4],table_14.loc['G'][4],
table_15.loc['G'][4],table_16.loc['G'][4],table_17.loc['G'][4],table_18.loc['G'][4],
table_19.loc['G'][4],table_110.loc['G'][4]])
a42=np.nanmean([table_11.loc['G'][5],table_12.loc['G'][5],table_13.loc['G'][5],table_14.loc['G'][5],
table_15.loc['G'][5],table_16.loc['G'][5],table_17.loc['G'][5],table_18.loc['G'][5],
table_19.loc['G'][5],table_110.loc['G'][5]])
a43=np.nanmean([table_11.loc['H'].accuracy,table_12.loc['H'].accuracy,table_13.loc['H'].accuracy,table_14.loc['H'].accuracy,
table_15.loc['H'].accuracy,table_16.loc['H'].accuracy,table_17.loc['H'].accuracy,table_18.loc['H'].accuracy,
table_19.loc['H'].accuracy,table_110.loc['H'].accuracy])
a44=np.nanmean([table_11.loc['H'].f1_score,table_12.loc['H'].f1_score,table_13.loc['H'].f1_score,table_14.loc['H'].f1_score,
table_15.loc['H'].f1_score,table_16.loc['H'].f1_score,table_17.loc['H'].f1_score,table_18.loc['H'].f1_score,
table_19.loc['H'].f1_score,table_110.loc['H'].f1_score])
a45=np.nanmean([table_11.loc['H'][2],table_12.loc['H'][2],table_13.loc['H'][2],table_14.loc['H'][2],
table_15.loc['H'][2],table_16.loc['H'][2],table_17.loc['H'][2],table_18.loc['H'][2],
table_19.loc['H'][2],table_110.loc['H'][2]])
a46=np.nanmean([table_11.loc['H'][3],table_12.loc['H'][3],table_13.loc['H'][3],table_14.loc['H'][3],
table_15.loc['H'][3],table_16.loc['H'][3],table_17.loc['H'][3],table_18.loc['H'][3],
table_19.loc['H'][3],table_110.loc['H'][3]])
a47=np.nanmean([table_11.loc['H'][4],table_12.loc['H'][4],table_13.loc['H'][4],table_14.loc['H'][4],
table_15.loc['H'][4],table_16.loc['H'][4],table_17.loc['H'][4],table_18.loc['H'][4],
table_19.loc['H'][4],table_110.loc['H'][4]])
a48=np.nanmean([table_11.loc['H'][5],table_12.loc['H'][5],table_13.loc['H'][5],table_14.loc['H'][5],
table_15.loc['H'][5],table_16.loc['H'][5],table_17.loc['H'][5],table_18.loc['H'][5],
table_19.loc['H'][5],table_110.loc['H'][5]])
a49=np.nanmean([table_11.loc['I'].accuracy,table_12.loc['I'].accuracy,table_13.loc['I'].accuracy,table_14.loc['I'].accuracy,
table_15.loc['I'].accuracy,table_16.loc['I'].accuracy,table_17.loc['I'].accuracy,table_18.loc['I'].accuracy,
table_19.loc['I'].accuracy,table_110.loc['I'].accuracy])
a50=np.nanmean([table_11.loc['I'].f1_score,table_12.loc['I'].f1_score,table_13.loc['I'].f1_score,table_14.loc['I'].f1_score,
table_15.loc['I'].f1_score,table_16.loc['I'].f1_score,table_17.loc['I'].f1_score,table_18.loc['I'].f1_score,
table_19.loc['I'].f1_score,table_110.loc['I'].f1_score])
a51=np.nanmean([table_11.loc['I'][2],table_12.loc['I'][2],table_13.loc['I'][2],table_14.loc['I'][2],
table_15.loc['I'][2],table_16.loc['I'][2],table_17.loc['I'][2],table_18.loc['I'][2],
table_19.loc['I'][2],table_110.loc['I'][2]])
a52=np.nanmean([table_11.loc['I'][3],table_12.loc['I'][3],table_13.loc['I'][3],table_14.loc['I'][3],
table_15.loc['I'][3],table_16.loc['I'][3],table_17.loc['I'][3],table_18.loc['I'][3],
table_19.loc['I'][3],table_110.loc['I'][3]])
a53=np.nanmean([table_11.loc['I'][4],table_12.loc['I'][4],table_13.loc['I'][4],table_14.loc['I'][4],
table_15.loc['I'][4],table_16.loc['I'][4],table_17.loc['I'][4],table_18.loc['I'][4],
table_19.loc['I'][4],table_110.loc['I'][4]])
a54=np.nanmean([table_11.loc['I'][5],table_12.loc['I'][5],table_13.loc['I'][5],table_14.loc['I'][5],
table_15.loc['I'][5],table_16.loc['I'][5],table_17.loc['I'][5],table_18.loc['I'][5],
table_19.loc['I'][5],table_110.loc['I'][5]])
a55=np.nanmean([table_11.loc['J'].accuracy,table_12.loc['J'].accuracy,table_13.loc['J'].accuracy,table_14.loc['J'].accuracy,
table_15.loc['J'].accuracy,table_16.loc['J'].accuracy,table_17.loc['J'].accuracy,table_18.loc['J'].accuracy,
table_19.loc['J'].accuracy,table_110.loc['J'].accuracy])
a56=np.nanmean([table_11.loc['J'].f1_score,table_12.loc['J'].f1_score,table_13.loc['J'].f1_score,table_14.loc['J'].f1_score,
table_15.loc['J'].f1_score,table_16.loc['J'].f1_score,table_17.loc['J'].f1_score,table_18.loc['J'].f1_score,
table_19.loc['J'].f1_score,table_110.loc['J'].f1_score])
a57=np.nanmean([table_11.loc['J'][2],table_12.loc['J'][2],table_13.loc['J'][2],table_14.loc['J'][2],
table_15.loc['J'][2],table_16.loc['J'][2],table_17.loc['J'][2],table_18.loc['J'][2],
table_19.loc['J'][2],table_110.loc['J'][2]])
a58=np.nanmean([table_11.loc['J'][3],table_12.loc['J'][3],table_13.loc['J'][3],table_14.loc['J'][3],
table_15.loc['J'][3],table_16.loc['J'][3],table_17.loc['J'][3],table_18.loc['J'][3],
table_19.loc['J'][3],table_110.loc['J'][3]])
a59=np.nanmean([table_11.loc['J'][4],table_12.loc['J'][4],table_13.loc['J'][4],table_14.loc['J'][4],
table_15.loc['J'][4],table_16.loc['J'][4],table_17.loc['J'][4],table_18.loc['J'][4],
table_19.loc['J'][4],table_110.loc['J'][4]])
a60=np.nanmean([table_11.loc['J'][5],table_12.loc['J'][5],table_13.loc['J'][5],table_14.loc['J'][5],
table_15.loc['J'][5],table_16.loc['J'][5],table_17.loc['J'][5],table_18.loc['J'][5],
table_19.loc['J'][5],table_110.loc['J'][5]])
a61=np.nanmean([table_11.loc['K'].accuracy,table_12.loc['K'].accuracy,table_13.loc['K'].accuracy,table_14.loc['K'].accuracy,
table_15.loc['K'].accuracy,table_16.loc['K'].accuracy,table_17.loc['K'].accuracy,table_18.loc['K'].accuracy,
table_19.loc['K'].accuracy,table_110.loc['K'].accuracy])
a62=np.nanmean([table_11.loc['K'].f1_score,table_12.loc['K'].f1_score,table_13.loc['K'].f1_score,table_14.loc['K'].f1_score,
table_15.loc['K'].f1_score,table_16.loc['K'].f1_score,table_17.loc['K'].f1_score,table_18.loc['K'].f1_score,
table_19.loc['K'].f1_score,table_110.loc['K'].f1_score])
a63=np.nanmean([table_11.loc['K'][2],table_12.loc['K'][2],table_13.loc['K'][2],table_14.loc['K'][2],
table_15.loc['K'][2],table_16.loc['K'][2],table_17.loc['K'][2],table_18.loc['K'][2],
table_19.loc['K'][2],table_110.loc['K'][2]])
a64=np.nanmean([table_11.loc['K'][3],table_12.loc['K'][3],table_13.loc['K'][3],table_14.loc['K'][3],
table_15.loc['K'][3],table_16.loc['K'][3],table_17.loc['K'][3],table_18.loc['K'][3],
table_19.loc['K'][3],table_110.loc['K'][3]])
a65=np.nanmean([table_11.loc['K'][4],table_12.loc['K'][4],table_13.loc['K'][4],table_14.loc['K'][4],
table_15.loc['K'][4],table_16.loc['K'][4],table_17.loc['K'][4],table_18.loc['K'][4],
table_19.loc['K'][4],table_110.loc['K'][4]])
a66=np.nanmean([table_11.loc['K'][5],table_12.loc['K'][5],table_13.loc['K'][5],table_14.loc['K'][5],
table_15.loc['K'][5],table_16.loc['K'][5],table_17.loc['K'][5],table_18.loc['K'][5],
table_19.loc['K'][5],table_110.loc['K'][5]])
a67=np.nanmean([table_11.loc['L'].accuracy,table_12.loc['L'].accuracy,table_13.loc['L'].accuracy,table_14.loc['L'].accuracy,
table_15.loc['L'].accuracy,table_16.loc['L'].accuracy,table_17.loc['L'].accuracy,table_18.loc['L'].accuracy,
table_19.loc['L'].accuracy,table_110.loc['L'].accuracy])
a68=np.nanmean([table_11.loc['L'].f1_score,table_12.loc['L'].f1_score,table_13.loc['L'].f1_score,table_14.loc['L'].f1_score,
table_15.loc['L'].f1_score,table_16.loc['L'].f1_score,table_17.loc['L'].f1_score,table_18.loc['L'].f1_score,
table_19.loc['L'].f1_score,table_110.loc['L'].f1_score])
a69=np.nanmean([table_11.loc['L'][2],table_12.loc['L'][2],table_13.loc['L'][2],table_14.loc['L'][2],
table_15.loc['L'][2],table_16.loc['L'][2],table_17.loc['L'][2],table_18.loc['L'][2],
table_19.loc['L'][2],table_110.loc['L'][2]])
a70=np.nanmean([table_11.loc['L'][3],table_12.loc['L'][3],table_13.loc['L'][3],table_14.loc['L'][3],
table_15.loc['L'][3],table_16.loc['L'][3],table_17.loc['L'][3],table_18.loc['L'][3],
table_19.loc['L'][3],table_110.loc['L'][3]])
a71=np.nanmean([table_11.loc['L'][4],table_12.loc['L'][4],table_13.loc['L'][4],table_14.loc['L'][4],
table_15.loc['L'][4],table_16.loc['L'][4],table_17.loc['L'][4],table_18.loc['L'][4],
table_19.loc['L'][4],table_110.loc['L'][4]])
a72=np.nanmean([table_11.loc['L'][5],table_12.loc['L'][5],table_13.loc['L'][5],table_14.loc['L'][5],
table_15.loc['L'][5],table_16.loc['L'][5],table_17.loc['L'][5],table_18.loc['L'][5],
table_19.loc['L'][5],table_110.loc['L'][5]])
a73=np.nanmean([table_11.loc['M'].accuracy,table_12.loc['M'].accuracy,table_13.loc['M'].accuracy,table_14.loc['M'].accuracy,
table_15.loc['M'].accuracy,table_16.loc['M'].accuracy,table_17.loc['M'].accuracy,table_18.loc['M'].accuracy,
table_19.loc['M'].accuracy,table_110.loc['M'].accuracy])
a74=np.nanmean([table_11.loc['M'].f1_score,table_12.loc['M'].f1_score,table_13.loc['M'].f1_score,table_14.loc['M'].f1_score,
table_15.loc['M'].f1_score,table_16.loc['M'].f1_score,table_17.loc['M'].f1_score,table_18.loc['M'].f1_score,
table_19.loc['M'].f1_score,table_110.loc['M'].f1_score])
a75=np.nanmean([table_11.loc['M'][2],table_12.loc['M'][2],table_13.loc['M'][2],table_14.loc['M'][2],
table_15.loc['M'][2],table_16.loc['M'][2],table_17.loc['M'][2],table_18.loc['M'][2],
table_19.loc['M'][2],table_110.loc['M'][2]])
a76=np.nanmean([table_11.loc['M'][3],table_12.loc['M'][3],table_13.loc['M'][3],table_14.loc['M'][3],
table_15.loc['M'][3],table_16.loc['M'][3],table_17.loc['M'][3],table_18.loc['M'][3],
table_19.loc['M'][3],table_110.loc['M'][3]])
a77=np.nanmean([table_11.loc['M'][4],table_12.loc['M'][4],table_13.loc['M'][4],table_14.loc['M'][4],
table_15.loc['M'][4],table_16.loc['M'][4],table_17.loc['M'][4],table_18.loc['M'][4],
table_19.loc['M'][4],table_110.loc['M'][4]])
a78=np.nanmean([table_11.loc['M'][5],table_12.loc['M'][5],table_13.loc['M'][5],table_14.loc['M'][5],
table_15.loc['M'][5],table_16.loc['M'][5],table_17.loc['M'][5],table_18.loc['M'][5],
table_19.loc['M'][5],table_110.loc['M'][5]])
a79=np.nanmean([table_11.loc['.'].accuracy,table_12.loc['.'].accuracy,table_13.loc['.'].accuracy,table_14.loc['.'].accuracy,
table_15.loc['.'].accuracy,table_16.loc['.'].accuracy,table_17.loc['.'].accuracy,table_18.loc['.'].accuracy,
table_19.loc['.'].accuracy,table_110.loc['.'].accuracy])
a80=np.nanmean([table_11.loc['.'].f1_score,table_12.loc['.'].f1_score,table_13.loc['.'].f1_score,table_14.loc['.'].f1_score,
table_15.loc['.'].f1_score,table_16.loc['.'].f1_score,table_17.loc['.'].f1_score,table_18.loc['.'].f1_score,
table_19.loc['.'].f1_score,table_110.loc['.'].f1_score])
a81=np.nanmean([table_11.loc['.'][2],table_12.loc['.'][2],table_13.loc['.'][2],table_14.loc['.'][2],
table_15.loc['.'][2],table_16.loc['.'][2],table_17.loc['.'][2],table_18.loc['.'][2],
table_19.loc['.'][2],table_110.loc['.'][2]])
a82=np.nanmean([table_11.loc['.'][3],table_12.loc['.'][3],table_13.loc['.'][3],table_14.loc['.'][3],
table_15.loc['.'][3],table_16.loc['.'][3],table_17.loc['.'][3],table_18.loc['.'][3],
table_19.loc['.'][3],table_110.loc['.'][3]])
a83=np.nanmean([table_11.loc['.'][4],table_12.loc['.'][4],table_13.loc['.'][4],table_14.loc['.'][4],
table_15.loc['.'][4],table_16.loc['.'][4],table_17.loc['.'][4],table_18.loc['.'][4],
table_19.loc['.'][4],table_110.loc['.'][4]])
a84=np.nanmean([table_11.loc['.'][5],table_12.loc['.'][5],table_13.loc['.'][5],table_14.loc['.'][5],
table_15.loc['.'][5],table_16.loc['.'][5],table_17.loc['.'][5],table_18.loc['.'][5],
table_19.loc['.'][5],table_110.loc['.'][5]])
A=[[a1,a2,a3,round(a4),a5,round(a6)],[a7,a8,a9,round(a10),a11,round(a12)],[a13,a14,a15,round(a16),a17,round(a18)],
[a19,a20,a21,round(a22),a23,round(a24)]
,[a25,a26,a27,round(a28),a29,round(a30)],[a31,a32,a33,round(a34),a35,round(a36)],
[a37,a38,a39,round(a40),a41,round(a42)],[a43,a44,a45,round(a46),a47,round(a48)],
[a49,a50,a51,round(a52),a53,round(a54)],[a55,a56,a57,round(a58),a59,round(a60)],
[a61,a62,a63,round(a64),a65,round(a66)],[a67,a68,a69,round(a70),a71,round(a72)],
[a73,a74,a75,round(a76),a77,round(a78)],[a79,a80,a81,round(a82),a83,round(a84)]]
vv1=np.mean([v1[0],v2[0],v3[0],v4[0],v5[0],v6[0],v7[0],v8[0],v9[0],v10[0]])
vv2=np.mean([v1[1],v2[1],v3[1],v4[1],v5[1],v6[1],v7[1],v8[1],v9[1],v10[1]])
vv3=np.mean([v1[2],v2[2],v3[2],v4[2],v5[2],v6[2],v7[2],v8[2],v9[2],v10[2]])
vv4=np.mean([v1[3],v2[3],v3[3],v4[3],v5[3],v6[3],v7[3],v8[3],v9[3],v10[3]])
vv5=np.mean([v1[4],v2[4],v3[4],v4[4],v5[4],v6[4],v7[4],v8[4],v9[4],v10[4]])
vv6=np.mean([v1[5],v2[5],v3[5],v4[5],v5[5],v6[5],v7[5],v8[5],v9[5],v10[5]])
table_111= pd.DataFrame(A,columns=['accuracy', 'f1_score', 'accuracy for unknown words',
'number of unknown words','accuracy for known words','number of known words']
,index=['A','B','C','D','E','F','G','H','I','J','K','L','M','.'])
#table_10= pd.DataFrame(A,
#columns=['accuracy', 'f1_score', 'accuracy for unknown words',
# 'number of unknown words','accuracy for known words','number of known words']
#,index=[list(tag2idx.keys())[0], list(tag2idx.keys())[1], list(tag2idx.keys())[2] , list(tag2idx.keys())[3]
#, list(tag2idx.keys())[4] , list(tag2idx.keys())[5],list(tag2idx.keys())[6],list(tag2idx.keys())[7]
#,list(tag2idx.keys())[8],list(tag2idx.keys())[9],list(tag2idx.keys())[10],list(tag2idx.keys())[11],
#list(tag2idx.keys())[12],list(tag2idx.keys())[13]])
str_pythontex=[float("{0:.2f}".format(list(table_111.loc["A"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["A"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["A"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["A"])[4]*100)),
round(list(table_111.loc["A"])[3]),round(list(table_111.loc["A"])[5]),
float("{0:.2f}".format(list(table_111.loc["B"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["B"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["B"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["B"])[4]*100)),
round(list(table_111.loc["B"])[3]),round(list(table_111.loc["B"])[5]),
float("{0:.2f}".format(list(table_111.loc["C"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["C"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["C"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["C"])[4]*100)),
round(list(table_111.loc["C"])[3]),round(list(table_111.loc["C"])[5]),
float("{0:.2f}".format(list(table_111.loc["D"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["D"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["D"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["D"])[4]*100)),
round(list(table_111.loc["D"])[3]),round(list(table_111.loc["D"])[5]),
float("{0:.2f}".format(list(table_111.loc["E"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["E"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["E"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["E"])[4]*100)),
round(list(table_111.loc["E"])[3]),round(list(table_111.loc["E"])[5]),
float("{0:.2f}".format(list(table_111.loc["F"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["F"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["F"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["F"])[4]*100)),
round(list(table_111.loc["F"])[3]),round(list(table_111.loc["F"])[5]),
float("{0:.2f}".format(list(table_111.loc["G"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["G"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["G"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["G"])[4]*100)),
round(list(table_111.loc["G"])[3]),round(list(table_111.loc["G"])[5]),
float("{0:.2f}".format(list(table_111.loc["H"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["H"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["H"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["H"])[4]*100)),
round(list(table_111.loc["H"])[3]),round(list(table_111.loc["H"])[5]),
float("{0:.2f}".format(list(table_111.loc["I"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["I"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["I"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["I"])[4]*100)),
round(list(table_111.loc["I"])[3]),round(list(table_111.loc["I"])[5]),
float("{0:.2f}".format(list(table_111.loc["J"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["J"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["J"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["J"])[4]*100)),
round(list(table_111.loc["J"])[3]),round(list(table_111.loc["J"])[5]),
float("{0:.2f}".format(list(table_111.loc["K"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["K"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["K"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["K"])[4]*100)),
round(list(table_111.loc["K"])[3]),round(list(table_111.loc["K"])[5]),
float("{0:.2f}".format(list(table_111.loc["L"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["L"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["L"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["L"])[4]*100)),
round(list(table_111.loc["L"])[3]),round(list(table_111.loc["L"])[5]),
float("{0:.2f}".format(list(table_111.loc["M"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["M"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["M"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["M"])[4]*100)),
round(list(table_111.loc["M"])[3]),round(list(table_111.loc["M"])[5]),
float("{0:.2f}".format(list(table_111.loc["."])[0]*100)),float("{0:.2f}".format(list(table_111.loc["."])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["."])[2]*100)),float("{0:.2f}".format(list(table_111.loc["."])[4]*100)),
round(list(table_111.loc["."])[3]),round(list(table_111.loc["."])[5]),float("{0:.2f}".format(vv1))
,float("{0:.2f}".format(vv2))
,float("{0:.2f}".format(vv3))
,float("{0:.2f}".format(vv4)),round(vv5)
,float("{0:.2f}".format(vv6))
]
L=[]
for x in str_pythontex:
if math.isnan(x):
L.append('NULL')
else:
L.append(str(x))
L1=[]
i=0
for x in L:
i=i+1
if i!=5 and i!=6 and x!="NULL":
L1.append(x+" \%")
elif x=="NULL":
L1.append(x)
elif i==5:
L1.append(x)
else:
L1.append(x)
i=0
L1[-1]=L1[-1]+" \%"
| 61.008081 | 130 | 0.594324 |
import numpy as np
a1=np.nanmean([table_11.loc['A'].accuracy,table_12.loc['A'].accuracy,table_13.loc['A'].accuracy,table_14.loc['A'].accuracy,
table_15.loc['A'].accuracy,table_16.loc['A'].accuracy,table_17.loc['A'].accuracy,table_18.loc['A'].accuracy,
table_19.loc['A'].accuracy,table_110.loc['A'].accuracy])
a2=np.nanmean([table_11.loc['A'].f1_score,table_12.loc['A'].f1_score,table_13.loc['A'].f1_score,table_14.loc['A'].f1_score,
table_15.loc['A'].f1_score,table_16.loc['A'].f1_score,table_17.loc['A'].f1_score,table_18.loc['A'].f1_score,
table_19.loc['A'].f1_score,table_110.loc['A'].f1_score])
a3=np.nanmean([table_11.loc['A'][2],table_12.loc['A'][2],table_13.loc['A'][2],table_14.loc['A'][2],
table_15.loc['A'][2],table_16.loc['A'][2],table_17.loc['A'][2],table_18.loc['A'][2],
table_19.loc['A'][2],table_110.loc['A'][2]])
a4=np.nanmean([table_11.loc['A'][3],table_12.loc['A'][3],table_13.loc['A'][3],table_14.loc['A'][3],
table_15.loc['A'][3],table_16.loc['A'][3],table_17.loc['A'][3],table_18.loc['A'][3],
table_19.loc['A'][3],table_110.loc['A'][3]])
a5=np.nanmean([table_11.loc['A'][4],table_12.loc['A'][4],table_13.loc['A'][4],table_14.loc['A'][4],
table_15.loc['A'][4],table_16.loc['A'][4],table_17.loc['A'][4],table_18.loc['A'][4],
table_19.loc['A'][4],table_110.loc['A'][4]])
a6=np.nanmean([table_11.loc['A'][5],table_12.loc['A'][5],table_13.loc['A'][5],table_14.loc['A'][5],
table_15.loc['A'][5],table_16.loc['A'][5],table_17.loc['A'][5],table_18.loc['A'][5],
table_19.loc['A'][5],table_110.loc['A'][5]])
a7=np.nanmean([table_11.loc['B'].accuracy,table_12.loc['B'].accuracy,table_13.loc['B'].accuracy,table_14.loc['B'].accuracy,
table_15.loc['B'].accuracy,table_16.loc['B'].accuracy,table_17.loc['B'].accuracy,table_18.loc['B'].accuracy,
table_19.loc['B'].accuracy,table_110.loc['B'].accuracy])
a8=np.nanmean([table_11.loc['B'].f1_score,table_12.loc['B'].f1_score,table_13.loc['B'].f1_score,table_14.loc['B'].f1_score,
table_15.loc['B'].f1_score,table_16.loc['B'].f1_score,table_17.loc['B'].f1_score,table_18.loc['B'].f1_score,
table_19.loc['B'].f1_score,table_110.loc['B'].f1_score])
a9=np.nanmean([table_11.loc['B'][2],table_12.loc['B'][2],table_13.loc['B'][2],table_14.loc['B'][2],
table_15.loc['B'][2],table_16.loc['B'][2],table_17.loc['B'][2],table_18.loc['B'][2],
table_19.loc['B'][2],table_110.loc['B'][2]])
a10=np.nanmean([table_11.loc['B'][3],table_12.loc['B'][3],table_13.loc['B'][3],table_14.loc['B'][3],
table_15.loc['B'][3],table_16.loc['B'][3],table_17.loc['B'][3],table_18.loc['B'][3],
table_19.loc['B'][3],table_110.loc['B'][3]])
a11=np.nanmean([table_11.loc['B'][4],table_12.loc['B'][4],table_13.loc['B'][4],table_14.loc['B'][4],
table_15.loc['B'][4],table_16.loc['B'][4],table_17.loc['B'][4],table_18.loc['B'][4],
table_19.loc['B'][4],table_110.loc['B'][4]])
a12=np.nanmean([table_11.loc['B'][5],table_12.loc['B'][5],table_13.loc['B'][5],table_14.loc['B'][5],
table_15.loc['B'][5],table_16.loc['B'][5],table_17.loc['B'][5],table_18.loc['B'][5],
table_19.loc['B'][5],table_110.loc['B'][5]])
a13=np.nanmean([table_11.loc['C'].accuracy,table_12.loc['C'].accuracy,table_13.loc['C'].accuracy,table_14.loc['C'].accuracy,
table_15.loc['C'].accuracy,table_16.loc['C'].accuracy,table_17.loc['C'].accuracy,table_18.loc['C'].accuracy,
table_19.loc['C'].accuracy,table_110.loc['C'].accuracy])
a14=np.nanmean([table_11.loc['C'].f1_score,table_12.loc['C'].f1_score,table_13.loc['C'].f1_score,table_14.loc['C'].f1_score,
table_15.loc['C'].f1_score,table_16.loc['C'].f1_score,table_17.loc['C'].f1_score,table_18.loc['C'].f1_score,
table_19.loc['C'].f1_score,table_110.loc['C'].f1_score])
a15=np.nanmean([table_11.loc['C'][2],table_12.loc['C'][2],table_13.loc['C'][2],table_14.loc['C'][2],
table_15.loc['C'][2],table_16.loc['C'][2],table_17.loc['C'][2],table_18.loc['C'][2],
table_19.loc['C'][2],table_110.loc['C'][2]])
a16=np.nanmean([table_11.loc['C'][3],table_12.loc['C'][3],table_13.loc['C'][3],table_14.loc['C'][3],
table_15.loc['C'][3],table_16.loc['C'][3],table_17.loc['C'][3],table_18.loc['C'][3],
table_19.loc['C'][3],table_110.loc['C'][3]])
a17=np.nanmean([table_11.loc['C'][4],table_12.loc['C'][4],table_13.loc['C'][4],table_14.loc['C'][4],
table_15.loc['C'][4],table_16.loc['C'][4],table_17.loc['C'][4],table_18.loc['C'][4],
table_19.loc['C'][4],table_110.loc['C'][4]])
a18=np.nanmean([table_11.loc['C'][5],table_12.loc['C'][5],table_13.loc['C'][5],table_14.loc['C'][5],
table_15.loc['C'][5],table_16.loc['C'][5],table_17.loc['C'][5],table_18.loc['C'][5],
table_19.loc['C'][5],table_110.loc['C'][5]])
a19=np.nanmean([table_11.loc['D'].accuracy,table_12.loc['D'].accuracy,table_13.loc['D'].accuracy,table_14.loc['D'].accuracy,
table_15.loc['D'].accuracy,table_16.loc['D'].accuracy,table_17.loc['D'].accuracy,table_18.loc['D'].accuracy,
table_19.loc['D'].accuracy,table_110.loc['D'].accuracy])
a20=np.nanmean([table_11.loc['D'].f1_score,table_12.loc['D'].f1_score,table_13.loc['D'].f1_score,table_14.loc['D'].f1_score,
table_15.loc['D'].f1_score,table_16.loc['D'].f1_score,table_17.loc['D'].f1_score,table_18.loc['D'].f1_score,
table_19.loc['D'].f1_score,table_110.loc['D'].f1_score])
a21=np.nanmean([table_11.loc['D'][2],table_12.loc['D'][2],table_13.loc['D'][2],table_14.loc['D'][2],
table_15.loc['D'][2],table_16.loc['D'][2],table_17.loc['D'][2],table_18.loc['D'][2],
table_19.loc['D'][2],table_110.loc['D'][2]])
a22=np.nanmean([table_11.loc['D'][3],table_12.loc['D'][3],table_13.loc['D'][3],table_14.loc['D'][3],
table_15.loc['D'][3],table_16.loc['D'][3],table_17.loc['D'][3],table_18.loc['D'][3],
table_19.loc['D'][3],table_110.loc['D'][3]])
a23=np.nanmean([table_11.loc['D'][4],table_12.loc['D'][4],table_13.loc['D'][4],table_14.loc['D'][4],
table_15.loc['D'][4],table_16.loc['D'][4],table_17.loc['D'][4],table_18.loc['D'][4],
table_19.loc['D'][4],table_110.loc['D'][4]])
a24=np.nanmean([table_11.loc['D'][5],table_12.loc['D'][5],table_13.loc['D'][5],table_14.loc['D'][5],
table_15.loc['D'][5],table_16.loc['D'][5],table_17.loc['D'][5],table_18.loc['D'][5],
table_19.loc['D'][5],table_110.loc['D'][5]])
a25=np.nanmean([table_11.loc['E'].accuracy,table_12.loc['E'].accuracy,table_13.loc['E'].accuracy,table_14.loc['E'].accuracy,
table_15.loc['E'].accuracy,table_16.loc['E'].accuracy,table_17.loc['E'].accuracy,table_18.loc['E'].accuracy,
table_19.loc['E'].accuracy,table_110.loc['E'].accuracy])
a26=np.nanmean([table_11.loc['E'].f1_score,table_12.loc['E'].f1_score,table_13.loc['E'].f1_score,table_14.loc['E'].f1_score,
table_15.loc['E'].f1_score,table_16.loc['E'].f1_score,table_17.loc['E'].f1_score,table_18.loc['E'].f1_score,
table_19.loc['E'].f1_score,table_110.loc['E'].f1_score])
a27=np.nanmean([table_11.loc['E'][2],table_12.loc['E'][2],table_13.loc['E'][2],table_14.loc['E'][2],
table_15.loc['E'][2],table_16.loc['E'][2],table_17.loc['E'][2],table_18.loc['E'][2],
table_19.loc['E'][2],table_110.loc['E'][2]])
a28=np.nanmean([table_11.loc['E'][3],table_12.loc['E'][3],table_13.loc['E'][3],table_14.loc['E'][3],
table_15.loc['E'][3],table_16.loc['E'][3],table_17.loc['E'][3],table_18.loc['E'][3],
table_19.loc['E'][3],table_110.loc['E'][3]])
a29=np.nanmean([table_11.loc['E'][4],table_12.loc['E'][4],table_13.loc['E'][4],table_14.loc['E'][4],
table_15.loc['E'][4],table_16.loc['E'][4],table_17.loc['E'][4],table_18.loc['E'][4],
table_19.loc['E'][4],table_110.loc['E'][4]])
a30=np.nanmean([table_11.loc['E'][5],table_12.loc['E'][5],table_13.loc['E'][5],table_14.loc['E'][5],
table_15.loc['E'][5],table_16.loc['E'][5],table_17.loc['E'][5],table_18.loc['E'][5],
table_19.loc['E'][5],table_110.loc['E'][5]])
a31=np.nanmean([table_11.loc['F'].accuracy,table_12.loc['F'].accuracy,table_13.loc['F'].accuracy,table_14.loc['F'].accuracy,
table_15.loc['F'].accuracy,table_16.loc['F'].accuracy,table_17.loc['F'].accuracy,table_18.loc['F'].accuracy,
table_19.loc['F'].accuracy,table_110.loc['F'].accuracy])
a32=np.nanmean([table_11.loc['F'].f1_score,table_12.loc['F'].f1_score,table_13.loc['F'].f1_score,table_14.loc['F'].f1_score,
table_15.loc['F'].f1_score,table_16.loc['F'].f1_score,table_17.loc['F'].f1_score,table_18.loc['F'].f1_score,
table_19.loc['F'].f1_score,table_110.loc['F'].f1_score])
a33=np.nanmean([table_11.loc['F'][2],table_12.loc['F'][2],table_13.loc['F'][2],table_14.loc['F'][2],
table_15.loc['F'][2],table_16.loc['F'][2],table_17.loc['F'][2],table_18.loc['F'][2],
table_19.loc['F'][2],table_110.loc['F'][2]])
a34=np.nanmean([table_11.loc['F'][3],table_12.loc['F'][3],table_13.loc['F'][3],table_14.loc['F'][3],
table_15.loc['F'][3],table_16.loc['F'][3],table_17.loc['F'][3],table_18.loc['F'][3],
table_19.loc['F'][3],table_110.loc['F'][3]])
a35=np.nanmean([table_11.loc['F'][4],table_12.loc['F'][4],table_13.loc['F'][4],table_14.loc['F'][4],
table_15.loc['F'][4],table_16.loc['F'][4],table_17.loc['F'][4],table_18.loc['F'][4],
table_19.loc['F'][4],table_110.loc['F'][4]])
a36=np.nanmean([table_11.loc['F'][5],table_12.loc['F'][5],table_13.loc['F'][5],table_14.loc['F'][5],
table_15.loc['F'][5],table_16.loc['F'][5],table_17.loc['F'][5],table_18.loc['F'][5],
table_19.loc['F'][5],table_110.loc['F'][5]])
a37=np.nanmean([table_11.loc['G'].accuracy,table_12.loc['G'].accuracy,table_13.loc['G'].accuracy,table_14.loc['G'].accuracy,
table_15.loc['G'].accuracy,table_16.loc['G'].accuracy,table_17.loc['G'].accuracy,table_18.loc['G'].accuracy,
table_19.loc['G'].accuracy,table_110.loc['G'].accuracy])
a38=np.nanmean([table_11.loc['G'].f1_score,table_12.loc['G'].f1_score,table_13.loc['G'].f1_score,table_14.loc['G'].f1_score,
table_15.loc['G'].f1_score,table_16.loc['G'].f1_score,table_17.loc['G'].f1_score,table_18.loc['G'].f1_score,
table_19.loc['G'].f1_score,table_110.loc['G'].f1_score])
a39=np.nanmean([table_11.loc['G'][2],table_12.loc['G'][2],table_13.loc['G'][2],table_14.loc['G'][2],
table_15.loc['G'][2],table_16.loc['G'][2],table_17.loc['G'][2],table_18.loc['G'][2],
table_19.loc['G'][2],table_110.loc['G'][2]])
a40=np.nanmean([table_11.loc['G'][3],table_12.loc['G'][3],table_13.loc['G'][3],table_14.loc['G'][3],
table_15.loc['G'][3],table_16.loc['G'][3],table_17.loc['G'][3],table_18.loc['G'][3],
table_19.loc['G'][3],table_110.loc['G'][3]])
a41=np.nanmean([table_11.loc['G'][4],table_12.loc['G'][4],table_13.loc['G'][4],table_14.loc['G'][4],
table_15.loc['G'][4],table_16.loc['G'][4],table_17.loc['G'][4],table_18.loc['G'][4],
table_19.loc['G'][4],table_110.loc['G'][4]])
a42=np.nanmean([table_11.loc['G'][5],table_12.loc['G'][5],table_13.loc['G'][5],table_14.loc['G'][5],
table_15.loc['G'][5],table_16.loc['G'][5],table_17.loc['G'][5],table_18.loc['G'][5],
table_19.loc['G'][5],table_110.loc['G'][5]])
a43=np.nanmean([table_11.loc['H'].accuracy,table_12.loc['H'].accuracy,table_13.loc['H'].accuracy,table_14.loc['H'].accuracy,
table_15.loc['H'].accuracy,table_16.loc['H'].accuracy,table_17.loc['H'].accuracy,table_18.loc['H'].accuracy,
table_19.loc['H'].accuracy,table_110.loc['H'].accuracy])
a44=np.nanmean([table_11.loc['H'].f1_score,table_12.loc['H'].f1_score,table_13.loc['H'].f1_score,table_14.loc['H'].f1_score,
table_15.loc['H'].f1_score,table_16.loc['H'].f1_score,table_17.loc['H'].f1_score,table_18.loc['H'].f1_score,
table_19.loc['H'].f1_score,table_110.loc['H'].f1_score])
a45=np.nanmean([table_11.loc['H'][2],table_12.loc['H'][2],table_13.loc['H'][2],table_14.loc['H'][2],
table_15.loc['H'][2],table_16.loc['H'][2],table_17.loc['H'][2],table_18.loc['H'][2],
table_19.loc['H'][2],table_110.loc['H'][2]])
a46=np.nanmean([table_11.loc['H'][3],table_12.loc['H'][3],table_13.loc['H'][3],table_14.loc['H'][3],
table_15.loc['H'][3],table_16.loc['H'][3],table_17.loc['H'][3],table_18.loc['H'][3],
table_19.loc['H'][3],table_110.loc['H'][3]])
a47=np.nanmean([table_11.loc['H'][4],table_12.loc['H'][4],table_13.loc['H'][4],table_14.loc['H'][4],
table_15.loc['H'][4],table_16.loc['H'][4],table_17.loc['H'][4],table_18.loc['H'][4],
table_19.loc['H'][4],table_110.loc['H'][4]])
a48=np.nanmean([table_11.loc['H'][5],table_12.loc['H'][5],table_13.loc['H'][5],table_14.loc['H'][5],
table_15.loc['H'][5],table_16.loc['H'][5],table_17.loc['H'][5],table_18.loc['H'][5],
table_19.loc['H'][5],table_110.loc['H'][5]])
a49=np.nanmean([table_11.loc['I'].accuracy,table_12.loc['I'].accuracy,table_13.loc['I'].accuracy,table_14.loc['I'].accuracy,
table_15.loc['I'].accuracy,table_16.loc['I'].accuracy,table_17.loc['I'].accuracy,table_18.loc['I'].accuracy,
table_19.loc['I'].accuracy,table_110.loc['I'].accuracy])
a50=np.nanmean([table_11.loc['I'].f1_score,table_12.loc['I'].f1_score,table_13.loc['I'].f1_score,table_14.loc['I'].f1_score,
table_15.loc['I'].f1_score,table_16.loc['I'].f1_score,table_17.loc['I'].f1_score,table_18.loc['I'].f1_score,
table_19.loc['I'].f1_score,table_110.loc['I'].f1_score])
a51=np.nanmean([table_11.loc['I'][2],table_12.loc['I'][2],table_13.loc['I'][2],table_14.loc['I'][2],
table_15.loc['I'][2],table_16.loc['I'][2],table_17.loc['I'][2],table_18.loc['I'][2],
table_19.loc['I'][2],table_110.loc['I'][2]])
a52=np.nanmean([table_11.loc['I'][3],table_12.loc['I'][3],table_13.loc['I'][3],table_14.loc['I'][3],
table_15.loc['I'][3],table_16.loc['I'][3],table_17.loc['I'][3],table_18.loc['I'][3],
table_19.loc['I'][3],table_110.loc['I'][3]])
a53=np.nanmean([table_11.loc['I'][4],table_12.loc['I'][4],table_13.loc['I'][4],table_14.loc['I'][4],
table_15.loc['I'][4],table_16.loc['I'][4],table_17.loc['I'][4],table_18.loc['I'][4],
table_19.loc['I'][4],table_110.loc['I'][4]])
a54=np.nanmean([table_11.loc['I'][5],table_12.loc['I'][5],table_13.loc['I'][5],table_14.loc['I'][5],
table_15.loc['I'][5],table_16.loc['I'][5],table_17.loc['I'][5],table_18.loc['I'][5],
table_19.loc['I'][5],table_110.loc['I'][5]])
a55=np.nanmean([table_11.loc['J'].accuracy,table_12.loc['J'].accuracy,table_13.loc['J'].accuracy,table_14.loc['J'].accuracy,
table_15.loc['J'].accuracy,table_16.loc['J'].accuracy,table_17.loc['J'].accuracy,table_18.loc['J'].accuracy,
table_19.loc['J'].accuracy,table_110.loc['J'].accuracy])
a56=np.nanmean([table_11.loc['J'].f1_score,table_12.loc['J'].f1_score,table_13.loc['J'].f1_score,table_14.loc['J'].f1_score,
table_15.loc['J'].f1_score,table_16.loc['J'].f1_score,table_17.loc['J'].f1_score,table_18.loc['J'].f1_score,
table_19.loc['J'].f1_score,table_110.loc['J'].f1_score])
a57=np.nanmean([table_11.loc['J'][2],table_12.loc['J'][2],table_13.loc['J'][2],table_14.loc['J'][2],
table_15.loc['J'][2],table_16.loc['J'][2],table_17.loc['J'][2],table_18.loc['J'][2],
table_19.loc['J'][2],table_110.loc['J'][2]])
a58=np.nanmean([table_11.loc['J'][3],table_12.loc['J'][3],table_13.loc['J'][3],table_14.loc['J'][3],
table_15.loc['J'][3],table_16.loc['J'][3],table_17.loc['J'][3],table_18.loc['J'][3],
table_19.loc['J'][3],table_110.loc['J'][3]])
a59=np.nanmean([table_11.loc['J'][4],table_12.loc['J'][4],table_13.loc['J'][4],table_14.loc['J'][4],
table_15.loc['J'][4],table_16.loc['J'][4],table_17.loc['J'][4],table_18.loc['J'][4],
table_19.loc['J'][4],table_110.loc['J'][4]])
a60=np.nanmean([table_11.loc['J'][5],table_12.loc['J'][5],table_13.loc['J'][5],table_14.loc['J'][5],
table_15.loc['J'][5],table_16.loc['J'][5],table_17.loc['J'][5],table_18.loc['J'][5],
table_19.loc['J'][5],table_110.loc['J'][5]])
a61=np.nanmean([table_11.loc['K'].accuracy,table_12.loc['K'].accuracy,table_13.loc['K'].accuracy,table_14.loc['K'].accuracy,
table_15.loc['K'].accuracy,table_16.loc['K'].accuracy,table_17.loc['K'].accuracy,table_18.loc['K'].accuracy,
table_19.loc['K'].accuracy,table_110.loc['K'].accuracy])
a62=np.nanmean([table_11.loc['K'].f1_score,table_12.loc['K'].f1_score,table_13.loc['K'].f1_score,table_14.loc['K'].f1_score,
table_15.loc['K'].f1_score,table_16.loc['K'].f1_score,table_17.loc['K'].f1_score,table_18.loc['K'].f1_score,
table_19.loc['K'].f1_score,table_110.loc['K'].f1_score])
a63=np.nanmean([table_11.loc['K'][2],table_12.loc['K'][2],table_13.loc['K'][2],table_14.loc['K'][2],
table_15.loc['K'][2],table_16.loc['K'][2],table_17.loc['K'][2],table_18.loc['K'][2],
table_19.loc['K'][2],table_110.loc['K'][2]])
a64=np.nanmean([table_11.loc['K'][3],table_12.loc['K'][3],table_13.loc['K'][3],table_14.loc['K'][3],
table_15.loc['K'][3],table_16.loc['K'][3],table_17.loc['K'][3],table_18.loc['K'][3],
table_19.loc['K'][3],table_110.loc['K'][3]])
a65=np.nanmean([table_11.loc['K'][4],table_12.loc['K'][4],table_13.loc['K'][4],table_14.loc['K'][4],
table_15.loc['K'][4],table_16.loc['K'][4],table_17.loc['K'][4],table_18.loc['K'][4],
table_19.loc['K'][4],table_110.loc['K'][4]])
a66=np.nanmean([table_11.loc['K'][5],table_12.loc['K'][5],table_13.loc['K'][5],table_14.loc['K'][5],
table_15.loc['K'][5],table_16.loc['K'][5],table_17.loc['K'][5],table_18.loc['K'][5],
table_19.loc['K'][5],table_110.loc['K'][5]])
a67=np.nanmean([table_11.loc['L'].accuracy,table_12.loc['L'].accuracy,table_13.loc['L'].accuracy,table_14.loc['L'].accuracy,
table_15.loc['L'].accuracy,table_16.loc['L'].accuracy,table_17.loc['L'].accuracy,table_18.loc['L'].accuracy,
table_19.loc['L'].accuracy,table_110.loc['L'].accuracy])
a68=np.nanmean([table_11.loc['L'].f1_score,table_12.loc['L'].f1_score,table_13.loc['L'].f1_score,table_14.loc['L'].f1_score,
table_15.loc['L'].f1_score,table_16.loc['L'].f1_score,table_17.loc['L'].f1_score,table_18.loc['L'].f1_score,
table_19.loc['L'].f1_score,table_110.loc['L'].f1_score])
a69=np.nanmean([table_11.loc['L'][2],table_12.loc['L'][2],table_13.loc['L'][2],table_14.loc['L'][2],
table_15.loc['L'][2],table_16.loc['L'][2],table_17.loc['L'][2],table_18.loc['L'][2],
table_19.loc['L'][2],table_110.loc['L'][2]])
a70=np.nanmean([table_11.loc['L'][3],table_12.loc['L'][3],table_13.loc['L'][3],table_14.loc['L'][3],
table_15.loc['L'][3],table_16.loc['L'][3],table_17.loc['L'][3],table_18.loc['L'][3],
table_19.loc['L'][3],table_110.loc['L'][3]])
a71=np.nanmean([table_11.loc['L'][4],table_12.loc['L'][4],table_13.loc['L'][4],table_14.loc['L'][4],
table_15.loc['L'][4],table_16.loc['L'][4],table_17.loc['L'][4],table_18.loc['L'][4],
table_19.loc['L'][4],table_110.loc['L'][4]])
a72=np.nanmean([table_11.loc['L'][5],table_12.loc['L'][5],table_13.loc['L'][5],table_14.loc['L'][5],
table_15.loc['L'][5],table_16.loc['L'][5],table_17.loc['L'][5],table_18.loc['L'][5],
table_19.loc['L'][5],table_110.loc['L'][5]])
a73=np.nanmean([table_11.loc['M'].accuracy,table_12.loc['M'].accuracy,table_13.loc['M'].accuracy,table_14.loc['M'].accuracy,
table_15.loc['M'].accuracy,table_16.loc['M'].accuracy,table_17.loc['M'].accuracy,table_18.loc['M'].accuracy,
table_19.loc['M'].accuracy,table_110.loc['M'].accuracy])
a74=np.nanmean([table_11.loc['M'].f1_score,table_12.loc['M'].f1_score,table_13.loc['M'].f1_score,table_14.loc['M'].f1_score,
table_15.loc['M'].f1_score,table_16.loc['M'].f1_score,table_17.loc['M'].f1_score,table_18.loc['M'].f1_score,
table_19.loc['M'].f1_score,table_110.loc['M'].f1_score])
a75=np.nanmean([table_11.loc['M'][2],table_12.loc['M'][2],table_13.loc['M'][2],table_14.loc['M'][2],
table_15.loc['M'][2],table_16.loc['M'][2],table_17.loc['M'][2],table_18.loc['M'][2],
table_19.loc['M'][2],table_110.loc['M'][2]])
a76=np.nanmean([table_11.loc['M'][3],table_12.loc['M'][3],table_13.loc['M'][3],table_14.loc['M'][3],
table_15.loc['M'][3],table_16.loc['M'][3],table_17.loc['M'][3],table_18.loc['M'][3],
table_19.loc['M'][3],table_110.loc['M'][3]])
a77=np.nanmean([table_11.loc['M'][4],table_12.loc['M'][4],table_13.loc['M'][4],table_14.loc['M'][4],
table_15.loc['M'][4],table_16.loc['M'][4],table_17.loc['M'][4],table_18.loc['M'][4],
table_19.loc['M'][4],table_110.loc['M'][4]])
a78=np.nanmean([table_11.loc['M'][5],table_12.loc['M'][5],table_13.loc['M'][5],table_14.loc['M'][5],
table_15.loc['M'][5],table_16.loc['M'][5],table_17.loc['M'][5],table_18.loc['M'][5],
table_19.loc['M'][5],table_110.loc['M'][5]])
a79=np.nanmean([table_11.loc['.'].accuracy,table_12.loc['.'].accuracy,table_13.loc['.'].accuracy,table_14.loc['.'].accuracy,
table_15.loc['.'].accuracy,table_16.loc['.'].accuracy,table_17.loc['.'].accuracy,table_18.loc['.'].accuracy,
table_19.loc['.'].accuracy,table_110.loc['.'].accuracy])
a80=np.nanmean([table_11.loc['.'].f1_score,table_12.loc['.'].f1_score,table_13.loc['.'].f1_score,table_14.loc['.'].f1_score,
table_15.loc['.'].f1_score,table_16.loc['.'].f1_score,table_17.loc['.'].f1_score,table_18.loc['.'].f1_score,
table_19.loc['.'].f1_score,table_110.loc['.'].f1_score])
a81=np.nanmean([table_11.loc['.'][2],table_12.loc['.'][2],table_13.loc['.'][2],table_14.loc['.'][2],
table_15.loc['.'][2],table_16.loc['.'][2],table_17.loc['.'][2],table_18.loc['.'][2],
table_19.loc['.'][2],table_110.loc['.'][2]])
a82=np.nanmean([table_11.loc['.'][3],table_12.loc['.'][3],table_13.loc['.'][3],table_14.loc['.'][3],
table_15.loc['.'][3],table_16.loc['.'][3],table_17.loc['.'][3],table_18.loc['.'][3],
table_19.loc['.'][3],table_110.loc['.'][3]])
a83=np.nanmean([table_11.loc['.'][4],table_12.loc['.'][4],table_13.loc['.'][4],table_14.loc['.'][4],
table_15.loc['.'][4],table_16.loc['.'][4],table_17.loc['.'][4],table_18.loc['.'][4],
table_19.loc['.'][4],table_110.loc['.'][4]])
a84=np.nanmean([table_11.loc['.'][5],table_12.loc['.'][5],table_13.loc['.'][5],table_14.loc['.'][5],
table_15.loc['.'][5],table_16.loc['.'][5],table_17.loc['.'][5],table_18.loc['.'][5],
table_19.loc['.'][5],table_110.loc['.'][5]])
A=[[a1,a2,a3,round(a4),a5,round(a6)],[a7,a8,a9,round(a10),a11,round(a12)],[a13,a14,a15,round(a16),a17,round(a18)],
[a19,a20,a21,round(a22),a23,round(a24)]
,[a25,a26,a27,round(a28),a29,round(a30)],[a31,a32,a33,round(a34),a35,round(a36)],
[a37,a38,a39,round(a40),a41,round(a42)],[a43,a44,a45,round(a46),a47,round(a48)],
[a49,a50,a51,round(a52),a53,round(a54)],[a55,a56,a57,round(a58),a59,round(a60)],
[a61,a62,a63,round(a64),a65,round(a66)],[a67,a68,a69,round(a70),a71,round(a72)],
[a73,a74,a75,round(a76),a77,round(a78)],[a79,a80,a81,round(a82),a83,round(a84)]]
vv1=np.mean([v1[0],v2[0],v3[0],v4[0],v5[0],v6[0],v7[0],v8[0],v9[0],v10[0]])
vv2=np.mean([v1[1],v2[1],v3[1],v4[1],v5[1],v6[1],v7[1],v8[1],v9[1],v10[1]])
vv3=np.mean([v1[2],v2[2],v3[2],v4[2],v5[2],v6[2],v7[2],v8[2],v9[2],v10[2]])
vv4=np.mean([v1[3],v2[3],v3[3],v4[3],v5[3],v6[3],v7[3],v8[3],v9[3],v10[3]])
vv5=np.mean([v1[4],v2[4],v3[4],v4[4],v5[4],v6[4],v7[4],v8[4],v9[4],v10[4]])
vv6=np.mean([v1[5],v2[5],v3[5],v4[5],v5[5],v6[5],v7[5],v8[5],v9[5],v10[5]])
table_111= pd.DataFrame(A,columns=['accuracy', 'f1_score', 'accuracy for unknown words',
'number of unknown words','accuracy for known words','number of known words']
,index=['A','B','C','D','E','F','G','H','I','J','K','L','M','.'])
str_pythontex=[float("{0:.2f}".format(list(table_111.loc["A"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["A"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["A"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["A"])[4]*100)),
round(list(table_111.loc["A"])[3]),round(list(table_111.loc["A"])[5]),
float("{0:.2f}".format(list(table_111.loc["B"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["B"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["B"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["B"])[4]*100)),
round(list(table_111.loc["B"])[3]),round(list(table_111.loc["B"])[5]),
float("{0:.2f}".format(list(table_111.loc["C"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["C"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["C"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["C"])[4]*100)),
round(list(table_111.loc["C"])[3]),round(list(table_111.loc["C"])[5]),
float("{0:.2f}".format(list(table_111.loc["D"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["D"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["D"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["D"])[4]*100)),
round(list(table_111.loc["D"])[3]),round(list(table_111.loc["D"])[5]),
float("{0:.2f}".format(list(table_111.loc["E"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["E"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["E"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["E"])[4]*100)),
round(list(table_111.loc["E"])[3]),round(list(table_111.loc["E"])[5]),
float("{0:.2f}".format(list(table_111.loc["F"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["F"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["F"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["F"])[4]*100)),
round(list(table_111.loc["F"])[3]),round(list(table_111.loc["F"])[5]),
float("{0:.2f}".format(list(table_111.loc["G"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["G"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["G"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["G"])[4]*100)),
round(list(table_111.loc["G"])[3]),round(list(table_111.loc["G"])[5]),
float("{0:.2f}".format(list(table_111.loc["H"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["H"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["H"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["H"])[4]*100)),
round(list(table_111.loc["H"])[3]),round(list(table_111.loc["H"])[5]),
float("{0:.2f}".format(list(table_111.loc["I"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["I"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["I"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["I"])[4]*100)),
round(list(table_111.loc["I"])[3]),round(list(table_111.loc["I"])[5]),
float("{0:.2f}".format(list(table_111.loc["J"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["J"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["J"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["J"])[4]*100)),
round(list(table_111.loc["J"])[3]),round(list(table_111.loc["J"])[5]),
float("{0:.2f}".format(list(table_111.loc["K"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["K"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["K"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["K"])[4]*100)),
round(list(table_111.loc["K"])[3]),round(list(table_111.loc["K"])[5]),
float("{0:.2f}".format(list(table_111.loc["L"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["L"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["L"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["L"])[4]*100)),
round(list(table_111.loc["L"])[3]),round(list(table_111.loc["L"])[5]),
float("{0:.2f}".format(list(table_111.loc["M"])[0]*100)),float("{0:.2f}".format(list(table_111.loc["M"])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["M"])[2]*100)),float("{0:.2f}".format(list(table_111.loc["M"])[4]*100)),
round(list(table_111.loc["M"])[3]),round(list(table_111.loc["M"])[5]),
float("{0:.2f}".format(list(table_111.loc["."])[0]*100)),float("{0:.2f}".format(list(table_111.loc["."])[1]*100)),
float("{0:.2f}".format(list(table_111.loc["."])[2]*100)),float("{0:.2f}".format(list(table_111.loc["."])[4]*100)),
round(list(table_111.loc["."])[3]),round(list(table_111.loc["."])[5]),float("{0:.2f}".format(vv1))
,float("{0:.2f}".format(vv2))
,float("{0:.2f}".format(vv3))
,float("{0:.2f}".format(vv4)),round(vv5)
,float("{0:.2f}".format(vv6))
]
L=[]
for x in str_pythontex:
if math.isnan(x):
L.append('NULL')
else:
L.append(str(x))
L1=[]
i=0
for x in L:
i=i+1
if i!=5 and i!=6 and x!="NULL":
L1.append(x+" \%")
elif x=="NULL":
L1.append(x)
elif i==5:
L1.append(x)
else:
L1.append(x)
i=0
L1[-1]=L1[-1]+" \%"
| true | true |
f724c79b6742776adde045c80e5e517302744145 | 2,786 | py | Python | scripts/dct/data/GpioData.py | ABM-Community-Ports/droidboot_device_planet-cosmocom | 4e157f7f3def69cc47e2c5c8fec5346feaea2a8c | [
"MIT"
] | 10 | 2020-07-17T14:51:36.000Z | 2022-03-12T03:35:42.000Z | scripts/dct/data/GpioData.py | ABM-Community-Ports/droidboot_device_planet-cosmocom | 4e157f7f3def69cc47e2c5c8fec5346feaea2a8c | [
"MIT"
] | 6 | 2020-07-23T19:33:25.000Z | 2021-02-23T18:21:59.000Z | scripts/dct/data/GpioData.py | ABM-Community-Ports/droidboot_device_planet-cosmocom | 4e157f7f3def69cc47e2c5c8fec5346feaea2a8c | [
"MIT"
] | 4 | 2020-11-12T03:07:39.000Z | 2022-03-23T19:30:20.000Z | #! /usr/bin/python
# -*- coding: utf-8 -*-
class GpioData:
_count = 0
_modNum = 8
_specMap = {}
_freqMap = {}
_mapList = []
_modeMap = {}
_smtMap = {}
_map_table = {}
def __init__(self):
self.__defMode = 0
self.__eintMode = False
self.__modeVec = ['0', '0', '0', '0', '0', '0', '0', '0']
self.__inPullEn = True
self.__inPullSelHigh = False
self.__defDirInt = 0
self.__defDir = 'IN'
self.__inEn = True
self.__outEn = False
self.__outHigh = False
self.__varNames = []
self.__smtNum = -1
self.__smtEn = False
self.__iesEn = True
self.__drvCur = ""
def get_defMode(self):
return self.__defMode
def set_defMode(self, mode):
self.__defMode = mode
def get_eintMode(self):
return self.__eintMode
def set_eintMode(self, flag):
self.__eintMode = flag
def get_modeVec(self):
return self.__modeVec
def set_modeVec(self, vec):
self.__modeVec = vec
def get_inPullEn(self):
return self.__inPullEn
def set_inpullEn(self, flag):
self.__inPullEn = flag
def get_inPullSelHigh(self):
return self.__inPullSelHigh
def set_inpullSelHigh(self, flag):
self.__inPullSelHigh = flag
def get_defDir(self):
return self.__defDir
def set_defDir(self, dir):
self.__defDir = dir
def get_inEn(self):
return self.__inEn
def set_inEn(self, flag):
self.__inEn = flag
def get_outEn(self):
return self.__outEn
def set_outEn(self, flag):
self.__outEn = flag
def get_outHigh(self):
return self.__outHigh
def set_outHigh(self, outHigh):
self.__outHigh = outHigh
def get_varNames(self):
return self.__varNames
def set_varNames(self, names):
self.__varNames = names
def set_smtEn(self, flag):
self.__smtEn = flag
def get_smtEn(self):
return self.__smtEn
def get_iesEn(self):
return self.__iesEn
def set_iesEn(self, flag):
self.__iesEn = flag
def set_drvCur(self, val):
self.__drvCur = val
def get_drvCur(self):
return self.__drvCur
def set_smtNum(self, num):
self.__smtNum = num
def get_smtNum(self):
return self.__smtNum
def ge_defDirInt(self):
if self.__defDir == 'IN':
return 0
else:
return 1
@staticmethod
def set_eint_map_table(map_table):
GpioData._map_table = map_table
@staticmethod
def get_modeName(key, idx):
if key in GpioData._modeMap.keys():
value = GpioData._modeMap[key]
return value[idx]
| 21.106061 | 65 | 0.585068 |
class GpioData:
_count = 0
_modNum = 8
_specMap = {}
_freqMap = {}
_mapList = []
_modeMap = {}
_smtMap = {}
_map_table = {}
def __init__(self):
self.__defMode = 0
self.__eintMode = False
self.__modeVec = ['0', '0', '0', '0', '0', '0', '0', '0']
self.__inPullEn = True
self.__inPullSelHigh = False
self.__defDirInt = 0
self.__defDir = 'IN'
self.__inEn = True
self.__outEn = False
self.__outHigh = False
self.__varNames = []
self.__smtNum = -1
self.__smtEn = False
self.__iesEn = True
self.__drvCur = ""
def get_defMode(self):
return self.__defMode
def set_defMode(self, mode):
self.__defMode = mode
def get_eintMode(self):
return self.__eintMode
def set_eintMode(self, flag):
self.__eintMode = flag
def get_modeVec(self):
return self.__modeVec
def set_modeVec(self, vec):
self.__modeVec = vec
def get_inPullEn(self):
return self.__inPullEn
def set_inpullEn(self, flag):
self.__inPullEn = flag
def get_inPullSelHigh(self):
return self.__inPullSelHigh
def set_inpullSelHigh(self, flag):
self.__inPullSelHigh = flag
def get_defDir(self):
return self.__defDir
def set_defDir(self, dir):
self.__defDir = dir
def get_inEn(self):
return self.__inEn
def set_inEn(self, flag):
self.__inEn = flag
def get_outEn(self):
return self.__outEn
def set_outEn(self, flag):
self.__outEn = flag
def get_outHigh(self):
return self.__outHigh
def set_outHigh(self, outHigh):
self.__outHigh = outHigh
def get_varNames(self):
return self.__varNames
def set_varNames(self, names):
self.__varNames = names
def set_smtEn(self, flag):
self.__smtEn = flag
def get_smtEn(self):
return self.__smtEn
def get_iesEn(self):
return self.__iesEn
def set_iesEn(self, flag):
self.__iesEn = flag
def set_drvCur(self, val):
self.__drvCur = val
def get_drvCur(self):
return self.__drvCur
def set_smtNum(self, num):
self.__smtNum = num
def get_smtNum(self):
return self.__smtNum
def ge_defDirInt(self):
if self.__defDir == 'IN':
return 0
else:
return 1
@staticmethod
def set_eint_map_table(map_table):
GpioData._map_table = map_table
@staticmethod
def get_modeName(key, idx):
if key in GpioData._modeMap.keys():
value = GpioData._modeMap[key]
return value[idx]
| true | true |
f724c7bf893a319eb8f171129f7bf5a55e44dd61 | 837 | py | Python | test/fpga/spi_video_ram_test/spi_video_ram_test.py | mbalestrini/hack_soc | 157428ee6856a9e4cee5953b8b3c144b4f57f5ee | [
"Apache-2.0"
] | 1 | 2021-12-18T18:31:53.000Z | 2021-12-18T18:31:53.000Z | test/fpga/spi_video_ram_test/spi_video_ram_test.py | mbalestrini/hack_soc | 157428ee6856a9e4cee5953b8b3c144b4f57f5ee | [
"Apache-2.0"
] | null | null | null | test/fpga/spi_video_ram_test/spi_video_ram_test.py | mbalestrini/hack_soc | 157428ee6856a9e4cee5953b8b3c144b4f57f5ee | [
"Apache-2.0"
] | null | null | null | import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, ClockCycles
import random
async def reset(dut):
dut.RESET_N <= 0
await ClockCycles(dut.EXTERNAL_CLK, 20)
dut.RESET_N <= 1
await ClockCycles(dut.EXTERNAL_CLK, 1)
@cocotb.test()
async def spi_video_ram_test(dut):
clock = Clock(dut.EXTERNAL_CLK, 10, units="us")
cocotb.fork(clock.start())
random.seed(0)
await reset(dut)
while (dut.writing_to_vram_mode==1):
await ClockCycles(dut.EXTERNAL_CLK, 1)
await ClockCycles(dut.EXTERNAL_CLK, 1)
for i in range(0, 10):
await RisingEdge(dut.display_active)
dut.SRAM_SIO0 = 0
dut.SRAM_SIO1 = 0
dut.SRAM_SIO2 = 0
dut.SRAM_SIO3 = 1
await ClockCycles(dut.EXTERNAL_CLK, 2000)
| 19.465116 | 64 | 0.659498 | import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, ClockCycles
import random
async def reset(dut):
dut.RESET_N <= 0
await ClockCycles(dut.EXTERNAL_CLK, 20)
dut.RESET_N <= 1
await ClockCycles(dut.EXTERNAL_CLK, 1)
@cocotb.test()
async def spi_video_ram_test(dut):
clock = Clock(dut.EXTERNAL_CLK, 10, units="us")
cocotb.fork(clock.start())
random.seed(0)
await reset(dut)
while (dut.writing_to_vram_mode==1):
await ClockCycles(dut.EXTERNAL_CLK, 1)
await ClockCycles(dut.EXTERNAL_CLK, 1)
for i in range(0, 10):
await RisingEdge(dut.display_active)
dut.SRAM_SIO0 = 0
dut.SRAM_SIO1 = 0
dut.SRAM_SIO2 = 0
dut.SRAM_SIO3 = 1
await ClockCycles(dut.EXTERNAL_CLK, 2000)
| true | true |
f724c85914703d848de5492a26e8b70312f96884 | 1,725 | py | Python | notest/ext/generator_mysql.py | GodQ/notest | 530d91782e8ed06493a1313facbed86e06662daf | [
"Apache-2.0"
] | 3 | 2019-05-10T09:36:07.000Z | 2021-04-16T23:40:46.000Z | notest/ext/generator_mysql.py | GodQ/notest | 530d91782e8ed06493a1313facbed86e06662daf | [
"Apache-2.0"
] | null | null | null | notest/ext/generator_mysql.py | GodQ/notest | 530d91782e8ed06493a1313facbed86e06662daf | [
"Apache-2.0"
] | 1 | 2019-05-10T09:43:48.000Z | 2019-05-10T09:43:48.000Z |
import sys
import logging
import json
logger = logging.Logger("mysql_generator")
from notest.lib.mysql_lib import MysqlClient
from notest.lib.utils import templated_var
from notest import generators
'''
- generators:
- task_id: {type: 'number_sequence', start: 10}
- task_name:
type: 'mysql'
query: 'select name from sites'
config: '$mysql_config'
'''
def parse_mysql_query_generator(config, variable_binds):
""" Parses configuration options for a mysql_query generator """
mysql_config = config.get('config')
sql = config.get('query')
return_dict_list = config.get('return_dict_list', False)
mysql_config = templated_var(mysql_config, variable_binds)
if isinstance(mysql_config, str):
mysql_config = json.loads(mysql_config)
sql = templated_var(sql)
if isinstance(return_dict_list, str):
return_dict_list = True if return_dict_list.lower() == 'true' else False
try:
with MysqlClient(mysql_config) as cli:
r = None
if return_dict_list is False:
res = cli.query(sql)
r = list()
for i in res:
if isinstance(i, tuple):
i = i[0]
r.append(i)
else:
r = cli.query(sql, return_dict_list=return_dict_list)
if len(r) == 0:
raise Exception("No data queried in MySQL by '{}'!".format(sql))
return generators.factory_fixed_sequence(r)()
except Exception as e:
logger.error(str(e))
raise ValueError("Invalid query: " + sql + " : " + str(e))
GENERATORS = {'mysql': parse_mysql_query_generator}
| 31.363636 | 80 | 0.607536 |
import sys
import logging
import json
logger = logging.Logger("mysql_generator")
from notest.lib.mysql_lib import MysqlClient
from notest.lib.utils import templated_var
from notest import generators
def parse_mysql_query_generator(config, variable_binds):
mysql_config = config.get('config')
sql = config.get('query')
return_dict_list = config.get('return_dict_list', False)
mysql_config = templated_var(mysql_config, variable_binds)
if isinstance(mysql_config, str):
mysql_config = json.loads(mysql_config)
sql = templated_var(sql)
if isinstance(return_dict_list, str):
return_dict_list = True if return_dict_list.lower() == 'true' else False
try:
with MysqlClient(mysql_config) as cli:
r = None
if return_dict_list is False:
res = cli.query(sql)
r = list()
for i in res:
if isinstance(i, tuple):
i = i[0]
r.append(i)
else:
r = cli.query(sql, return_dict_list=return_dict_list)
if len(r) == 0:
raise Exception("No data queried in MySQL by '{}'!".format(sql))
return generators.factory_fixed_sequence(r)()
except Exception as e:
logger.error(str(e))
raise ValueError("Invalid query: " + sql + " : " + str(e))
GENERATORS = {'mysql': parse_mysql_query_generator}
| true | true |
f724c9711779d4f88a28880eec79b4a0e04ab006 | 907 | py | Python | src/coinc/exceptions.py | kimklai/Coinc | dbce0d257d90104bd012996c18884a68e01375a9 | [
"MIT"
] | 15 | 2020-07-11T23:30:23.000Z | 2022-03-25T08:10:26.000Z | src/coinc/exceptions.py | kimklai/Coinc | dbce0d257d90104bd012996c18884a68e01375a9 | [
"MIT"
] | 10 | 2020-06-26T18:20:22.000Z | 2022-03-31T02:55:29.000Z | src/coinc/exceptions.py | kimklai/Coinc | dbce0d257d90104bd012996c18884a68e01375a9 | [
"MIT"
] | 2 | 2020-09-10T10:51:01.000Z | 2021-04-11T09:08:48.000Z | # -*- coding: utf-8 -*-
"""Exceptions used in this module"""
class CoincError(Exception):
"""Base Class used to declare other errors for Coinc
Extends:
Exception
"""
pass
class ConfigError(CoincError):
"""Raised when there are invalid value filled in Configuration Sheet
Extends:
CoincError
"""
pass
class QueryError(CoincError):
"""Raised when invalid query were given
Extends:
CoincError
"""
pass
class AppIDError(CoincError):
"""Raised when App ID can not be used
Extends:
CoincError
"""
pass
class ApiError(CoincError):
"""Raised when API is unreachable or return bad response
Extends:
CoincError
"""
pass
class UnknownPythonError(CoincError):
"""Raised when Python runtime version can not be correctly detacted
Extends:
CoincError
"""
pass
| 15.912281 | 72 | 0.63065 |
class CoincError(Exception):
pass
class ConfigError(CoincError):
pass
class QueryError(CoincError):
pass
class AppIDError(CoincError):
pass
class ApiError(CoincError):
pass
class UnknownPythonError(CoincError):
pass
| true | true |
f724c9cbe59430da0dd2210d4efb6ddff77348cb | 9,522 | py | Python | tessia/server/db/alembic/versions/4f32ee5b2d29_0_0_3_remove_os_from_template_add_.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
] | 5 | 2020-06-04T10:20:33.000Z | 2020-10-26T15:09:19.000Z | tessia/server/db/alembic/versions/4f32ee5b2d29_0_0_3_remove_os_from_template_add_.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
] | null | null | null | tessia/server/db/alembic/versions/4f32ee5b2d29_0_0_3_remove_os_from_template_add_.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=all
"""0.0.3 remove os from template, add template to operating_systems
Revision ID: 4f32ee5b2d29
Revises: 14e7934c17c8
Create Date: 2018-03-15 13:39:57.863743
"""
# revision identifiers, used by Alembic.
revision = '4f32ee5b2d29'
down_revision = '14e7934c17c8'
branch_labels = None
depends_on = None
from alembic import op
from sqlalchemy import Column
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.schema import ForeignKey
from sqlalchemy.types import Boolean, Integer, LargeBinary, String
import os
import sqlalchemy as sa
SESSION = sessionmaker()
BASE = declarative_base()
# declare the models used in this migration
class CommonMixin(object):
"""
Helper mixin to set attributes common to most classes
"""
id = Column(Integer, primary_key=True)
# CommonMixin
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('operating_systems', sa.Column('template_id', sa.Integer(), nullable=True))
op.create_foreign_key(op.f('fk_operating_systems_template_id_templates'), 'operating_systems', 'templates', ['template_id'], ['id'])
op.drop_column('operating_systems', 'cmdline')
op.alter_column('operating_systems', 'desc', new_column_name='pretty_name')
op.alter_column('templates', 'content',
existing_type=sa.VARCHAR(),
nullable=False)
op.drop_index('ix_templates_operating_system_id', table_name='templates')
op.drop_constraint('fk_templates_operating_system_id_operating_systems', 'templates', type_='foreignkey')
op.drop_column('templates', 'operating_system_id')
# ### end Alembic commands ###
class OperatingSystem(CommonMixin, BASE):
"""A supported operating system"""
__tablename__ = 'operating_systems'
name = Column(String, unique=True, index=True)
type = Column(String, nullable=False)
major = Column(Integer, nullable=False)
minor = Column(Integer, nullable=False)
pretty_name = Column(String, nullable=False)
# default auto install template
template_id = Column(Integer, ForeignKey('templates.id'))
# OperatingSystem
class Template(CommonMixin, BASE):
"""A template for a InstallMachine"""
__tablename__ = "templates"
name = Column(String, unique=True, nullable=False)
content = Column(String, nullable=False)
desc = Column(String)
# Template
# data migration
session = SESSION(bind=op.get_bind())
# update templates to new names and descriptions
templates_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../../templates/")
update_templates = {
'RHEL7.2': (
'rhel7-default', 'Default template for RHEL7 installations'),
'SLES12.1': (
'sles12-default', 'Default template for SLES12 installations'),
'UBUNTU16.04.1': (
'ubuntu16-default', 'Default template for Ubuntu16 installations'),
}
for key, value in update_templates.items():
temp_obj = session.query(Template).filter_by(name=key).one()
temp_obj.name = value[0]
temp_obj.desc = value[1]
template_path = '{}.jinja'.format(value[0])
with open(templates_dir + '/' + template_path, "r") as template_file:
temp_obj.content = template_file.read()
# update existing oses to new type, pretty name, template
update_oses = {
'rhel7.2': ('redhat', 'Red Hat Enterprise Linux Server 7.2 (Maipo)', 'rhel7-default'),
'sles12.1': ('suse', 'SUSE Linux Enterprise Server 12 SP1', 'sles12-default'),
'ubuntu16.04.1': ('debian', 'Ubuntu 16.04.1 LTS', 'ubuntu16-default'),
}
for key, value in update_oses.items():
temp_obj = session.query(Template).filter_by(name=value[2]).one()
os_obj = session.query(OperatingSystem).filter_by(name=key).one()
os_obj.type = value[0]
if key == 'ubuntu16.04.1':
os_obj.major = 1604
os_obj.minor = 1
os_obj.pretty_name = value[1]
os_obj.template_id = temp_obj.id
# insert new oses
new_oses = [
'cms,cms,0,0,z/VM Conversational Monitor System (CMS),,',
'rhel7.3,redhat,7,3,Red Hat Enterprise Linux Server 7.3 (Maipo),rhel7-default',
'rhel7.4,redhat,7,4,Red Hat Enterprise Linux Server 7.4 (Maipo),rhel7-default',
'sles12.2,suse,12,2,SUSE Linux Enterprise Server 12 SP2,sles12-default',
'sles12.3,suse,12,3,SUSE Linux Enterprise Server 12 SP3,sles12-default',
'ubuntu16.04.2,debian,1604,2,Ubuntu 16.04.2 LTS,ubuntu16-default',
'ubuntu16.04.3,debian,1604,3,Ubuntu 16.04.3 LTS,ubuntu16-default',
]
for row in new_oses:
row = row.split(',', 6)
if row[0] == 'cms':
template = None
else:
temp_obj = session.query(Template).filter_by(name=row[5]).one()
template = temp_obj.id
os_obj = session.query(OperatingSystem).filter_by(name=row[0]).one_or_none()
if not os_obj:
os_obj = OperatingSystem()
os_obj.name = row[0],
os_obj.type = row[1]
os_obj.major = row[2]
os_obj.minor = row[3]
os_obj.pretty_name = row[4]
os_obj.template_id = template
session.add(os_obj)
session.commit()
# upgrade
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('templates', sa.Column('operating_system_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('fk_templates_operating_system_id_operating_systems', 'templates', 'operating_systems', ['operating_system_id'], ['id'])
op.create_index('ix_templates_operating_system_id', 'templates', ['operating_system_id'], unique=False)
op.alter_column('templates', 'content',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('operating_systems', 'pretty_name', new_column_name='desc')
op.add_column('operating_systems', sa.Column('cmdline', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint(op.f('fk_operating_systems_template_id_templates'), 'operating_systems', type_='foreignkey')
op.drop_column('operating_systems', 'template_id')
# ### end Alembic commands ###
class OldOperatingSystem(CommonMixin, BASE):
"""Downgrade version of operating system"""
__tablename__ = 'operating_systems'
name = Column(String, unique=True, index=True)
type = Column(String, nullable=False)
major = Column(Integer, nullable=False)
minor = Column(Integer, nullable=False)
desc = Column(String, nullable=False)
cmdline = Column(String)
# OldOperatingSystem
class OldTemplate(CommonMixin, BASE):
"""The downgrade version of template"""
__tablename__ = "templates"
name = Column(String, unique=True, nullable=False)
content = Column(String, nullable=False)
desc = Column(String)
operating_system_id = Column(
Integer, ForeignKey('operating_systems.id'), index=True)
# OldTemplate
# data revert
session = SESSION(bind=op.get_bind())
# set templates to old name and description
update_templates = {
'rhel7-default': ('RHEL7.2', 'Template for RHEL7.2', 'rhel7.2'),
'sles12-default': ('SLES12.1', 'Template for SLES12.1', 'sles12.1'),
'ubuntu16-default': ('UBUNTU16.04.1', 'Template for Ubuntu 16.04.1', 'ubuntu16.04.1'),
}
for key, value in update_templates.items():
os_obj = session.query(OldOperatingSystem).filter_by(name=value[2]).one()
temp_obj = session.query(OldTemplate).filter_by(name=key).one()
temp_obj.name = value[0]
temp_obj.desc = value[1]
temp_obj.operating_system_id = os_obj.id
# set oses back to old type and description
templates_dir = os.path.abspath(
os.path.dirname(os.path.abspath(__file__)) +
"/../../../state_machines/autoinstall/templates")
update_oses = {
'rhel7.2': ('rhel', 'RHEL 7.2 GA', 'rhel7-default'),
'sles12.1': ('sles', 'SLES 12.1', 'sles12-default'),
'ubuntu16.04.1': ('ubuntu', 'Ubuntu 16.04.1', 'ubuntu16-default'),
}
for key, value in update_oses.items():
os_obj = session.query(OldOperatingSystem).filter_by(name=key).one()
new_type = os_obj.type
os_obj.type = value[0]
if key == 'ubuntu16.04.1':
os_obj.major = 16
os_obj.minor = 4
os_obj.desc = value[1]
cmdline_template_path = '{}.cmdline.jinja'.format(new_type)
with open(templates_dir + '/' + cmdline_template_path, "r") as cmdline_file:
os_obj.cmdline = cmdline_file.read()
session.commit()
# downgrade
| 40.347458 | 146 | 0.664251 |
revision = '4f32ee5b2d29'
down_revision = '14e7934c17c8'
branch_labels = None
depends_on = None
from alembic import op
from sqlalchemy import Column
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.schema import ForeignKey
from sqlalchemy.types import Boolean, Integer, LargeBinary, String
import os
import sqlalchemy as sa
SESSION = sessionmaker()
BASE = declarative_base()
class CommonMixin(object):
id = Column(Integer, primary_key=True)
def upgrade():
ing_systems', 'templates', ['template_id'], ['id'])
op.drop_column('operating_systems', 'cmdline')
op.alter_column('operating_systems', 'desc', new_column_name='pretty_name')
op.alter_column('templates', 'content',
existing_type=sa.VARCHAR(),
nullable=False)
op.drop_index('ix_templates_operating_system_id', table_name='templates')
op.drop_constraint('fk_templates_operating_system_id_operating_systems', 'templates', type_='foreignkey')
op.drop_column('templates', 'operating_system_id')
tems'
name = Column(String, unique=True, index=True)
type = Column(String, nullable=False)
major = Column(Integer, nullable=False)
minor = Column(Integer, nullable=False)
pretty_name = Column(String, nullable=False)
template_id = Column(Integer, ForeignKey('templates.id'))
class Template(CommonMixin, BASE):
__tablename__ = "templates"
name = Column(String, unique=True, nullable=False)
content = Column(String, nullable=False)
desc = Column(String)
session = SESSION(bind=op.get_bind())
templates_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../../templates/")
update_templates = {
'RHEL7.2': (
'rhel7-default', 'Default template for RHEL7 installations'),
'SLES12.1': (
'sles12-default', 'Default template for SLES12 installations'),
'UBUNTU16.04.1': (
'ubuntu16-default', 'Default template for Ubuntu16 installations'),
}
for key, value in update_templates.items():
temp_obj = session.query(Template).filter_by(name=key).one()
temp_obj.name = value[0]
temp_obj.desc = value[1]
template_path = '{}.jinja'.format(value[0])
with open(templates_dir + '/' + template_path, "r") as template_file:
temp_obj.content = template_file.read()
update_oses = {
'rhel7.2': ('redhat', 'Red Hat Enterprise Linux Server 7.2 (Maipo)', 'rhel7-default'),
'sles12.1': ('suse', 'SUSE Linux Enterprise Server 12 SP1', 'sles12-default'),
'ubuntu16.04.1': ('debian', 'Ubuntu 16.04.1 LTS', 'ubuntu16-default'),
}
for key, value in update_oses.items():
temp_obj = session.query(Template).filter_by(name=value[2]).one()
os_obj = session.query(OperatingSystem).filter_by(name=key).one()
os_obj.type = value[0]
if key == 'ubuntu16.04.1':
os_obj.major = 1604
os_obj.minor = 1
os_obj.pretty_name = value[1]
os_obj.template_id = temp_obj.id
new_oses = [
'cms,cms,0,0,z/VM Conversational Monitor System (CMS),,',
'rhel7.3,redhat,7,3,Red Hat Enterprise Linux Server 7.3 (Maipo),rhel7-default',
'rhel7.4,redhat,7,4,Red Hat Enterprise Linux Server 7.4 (Maipo),rhel7-default',
'sles12.2,suse,12,2,SUSE Linux Enterprise Server 12 SP2,sles12-default',
'sles12.3,suse,12,3,SUSE Linux Enterprise Server 12 SP3,sles12-default',
'ubuntu16.04.2,debian,1604,2,Ubuntu 16.04.2 LTS,ubuntu16-default',
'ubuntu16.04.3,debian,1604,3,Ubuntu 16.04.3 LTS,ubuntu16-default',
]
for row in new_oses:
row = row.split(',', 6)
if row[0] == 'cms':
template = None
else:
temp_obj = session.query(Template).filter_by(name=row[5]).one()
template = temp_obj.id
os_obj = session.query(OperatingSystem).filter_by(name=row[0]).one_or_none()
if not os_obj:
os_obj = OperatingSystem()
os_obj.name = row[0],
os_obj.type = row[1]
os_obj.major = row[2]
os_obj.minor = row[3]
os_obj.pretty_name = row[4]
os_obj.template_id = template
session.add(os_obj)
session.commit()
def downgrade():
ating_systems', 'templates', 'operating_systems', ['operating_system_id'], ['id'])
op.create_index('ix_templates_operating_system_id', 'templates', ['operating_system_id'], unique=False)
op.alter_column('templates', 'content',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('operating_systems', 'pretty_name', new_column_name='desc')
op.add_column('operating_systems', sa.Column('cmdline', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint(op.f('fk_operating_systems_template_id_templates'), 'operating_systems', type_='foreignkey')
op.drop_column('operating_systems', 'template_id')
systems'
name = Column(String, unique=True, index=True)
type = Column(String, nullable=False)
major = Column(Integer, nullable=False)
minor = Column(Integer, nullable=False)
desc = Column(String, nullable=False)
cmdline = Column(String)
class OldTemplate(CommonMixin, BASE):
__tablename__ = "templates"
name = Column(String, unique=True, nullable=False)
content = Column(String, nullable=False)
desc = Column(String)
operating_system_id = Column(
Integer, ForeignKey('operating_systems.id'), index=True)
session = SESSION(bind=op.get_bind())
update_templates = {
'rhel7-default': ('RHEL7.2', 'Template for RHEL7.2', 'rhel7.2'),
'sles12-default': ('SLES12.1', 'Template for SLES12.1', 'sles12.1'),
'ubuntu16-default': ('UBUNTU16.04.1', 'Template for Ubuntu 16.04.1', 'ubuntu16.04.1'),
}
for key, value in update_templates.items():
os_obj = session.query(OldOperatingSystem).filter_by(name=value[2]).one()
temp_obj = session.query(OldTemplate).filter_by(name=key).one()
temp_obj.name = value[0]
temp_obj.desc = value[1]
temp_obj.operating_system_id = os_obj.id
templates_dir = os.path.abspath(
os.path.dirname(os.path.abspath(__file__)) +
"/../../../state_machines/autoinstall/templates")
update_oses = {
'rhel7.2': ('rhel', 'RHEL 7.2 GA', 'rhel7-default'),
'sles12.1': ('sles', 'SLES 12.1', 'sles12-default'),
'ubuntu16.04.1': ('ubuntu', 'Ubuntu 16.04.1', 'ubuntu16-default'),
}
for key, value in update_oses.items():
os_obj = session.query(OldOperatingSystem).filter_by(name=key).one()
new_type = os_obj.type
os_obj.type = value[0]
if key == 'ubuntu16.04.1':
os_obj.major = 16
os_obj.minor = 4
os_obj.desc = value[1]
cmdline_template_path = '{}.cmdline.jinja'.format(new_type)
with open(templates_dir + '/' + cmdline_template_path, "r") as cmdline_file:
os_obj.cmdline = cmdline_file.read()
session.commit()
| true | true |
f724c9e936f9b464bc9ef938bd84202c5c01e1e8 | 6,935 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/operations/__init__.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/operations/__init__.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/operations/__init__.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._operations import ApplicationGatewaysOperations
from ._operations import ApplicationSecurityGroupsOperations
from ._operations import AvailableDelegationsOperations
from ._operations import AvailableResourceGroupDelegationsOperations
from ._operations import AzureFirewallsOperations
from ._operations import AzureFirewallFqdnTagsOperations
from ._operations import NetworkManagementClientOperationsMixin
from ._operations import DdosProtectionPlansOperations
from ._operations import AvailableEndpointServicesOperations
from ._operations import ExpressRouteCircuitAuthorizationsOperations
from ._operations import ExpressRouteCircuitPeeringsOperations
from ._operations import ExpressRouteCircuitConnectionsOperations
from ._operations import ExpressRouteCircuitsOperations
from ._operations import ExpressRouteServiceProvidersOperations
from ._operations import ExpressRouteCrossConnectionsOperations
from ._operations import ExpressRouteCrossConnectionPeeringsOperations
from ._operations import ExpressRouteGatewaysOperations
from ._operations import ExpressRouteConnectionsOperations
from ._operations import ExpressRoutePortsLocationsOperations
from ._operations import ExpressRoutePortsOperations
from ._operations import ExpressRouteLinksOperations
from ._operations import InterfaceEndpointsOperations
from ._operations import LoadBalancersOperations
from ._operations import LoadBalancerBackendAddressPoolsOperations
from ._operations import LoadBalancerFrontendIPConfigurationsOperations
from ._operations import InboundNatRulesOperations
from ._operations import LoadBalancerLoadBalancingRulesOperations
from ._operations import LoadBalancerOutboundRulesOperations
from ._operations import LoadBalancerNetworkInterfacesOperations
from ._operations import LoadBalancerProbesOperations
from ._operations import NetworkInterfacesOperations
from ._operations import NetworkInterfaceIPConfigurationsOperations
from ._operations import NetworkInterfaceLoadBalancersOperations
from ._operations import NetworkInterfaceTapConfigurationsOperations
from ._operations import NetworkProfilesOperations
from ._operations import NetworkSecurityGroupsOperations
from ._operations import SecurityRulesOperations
from ._operations import DefaultSecurityRulesOperations
from ._operations import NetworkWatchersOperations
from ._operations import PacketCapturesOperations
from ._operations import ConnectionMonitorsOperations
from ._operations import Operations
from ._operations import PublicIPAddressesOperations
from ._operations import PublicIPPrefixesOperations
from ._operations import RouteFiltersOperations
from ._operations import RouteFilterRulesOperations
from ._operations import RouteTablesOperations
from ._operations import RoutesOperations
from ._operations import BgpServiceCommunitiesOperations
from ._operations import ServiceEndpointPoliciesOperations
from ._operations import ServiceEndpointPolicyDefinitionsOperations
from ._operations import UsagesOperations
from ._operations import VirtualNetworksOperations
from ._operations import SubnetsOperations
from ._operations import VirtualNetworkPeeringsOperations
from ._operations import VirtualNetworkTapsOperations
from ._operations import VirtualNetworkGatewaysOperations
from ._operations import VirtualNetworkGatewayConnectionsOperations
from ._operations import LocalNetworkGatewaysOperations
from ._operations import VirtualWansOperations
from ._operations import VpnSitesOperations
from ._operations import VpnSitesConfigurationOperations
from ._operations import VirtualHubsOperations
from ._operations import HubVirtualNetworkConnectionsOperations
from ._operations import VpnGatewaysOperations
from ._operations import VpnConnectionsOperations
from ._operations import P2SVpnServerConfigurationsOperations
from ._operations import P2SVpnGatewaysOperations
__all__ = [
'ApplicationGatewaysOperations',
'ApplicationSecurityGroupsOperations',
'AvailableDelegationsOperations',
'AvailableResourceGroupDelegationsOperations',
'AzureFirewallsOperations',
'AzureFirewallFqdnTagsOperations',
'NetworkManagementClientOperationsMixin',
'DdosProtectionPlansOperations',
'AvailableEndpointServicesOperations',
'ExpressRouteCircuitAuthorizationsOperations',
'ExpressRouteCircuitPeeringsOperations',
'ExpressRouteCircuitConnectionsOperations',
'ExpressRouteCircuitsOperations',
'ExpressRouteServiceProvidersOperations',
'ExpressRouteCrossConnectionsOperations',
'ExpressRouteCrossConnectionPeeringsOperations',
'ExpressRouteGatewaysOperations',
'ExpressRouteConnectionsOperations',
'ExpressRoutePortsLocationsOperations',
'ExpressRoutePortsOperations',
'ExpressRouteLinksOperations',
'InterfaceEndpointsOperations',
'LoadBalancersOperations',
'LoadBalancerBackendAddressPoolsOperations',
'LoadBalancerFrontendIPConfigurationsOperations',
'InboundNatRulesOperations',
'LoadBalancerLoadBalancingRulesOperations',
'LoadBalancerOutboundRulesOperations',
'LoadBalancerNetworkInterfacesOperations',
'LoadBalancerProbesOperations',
'NetworkInterfacesOperations',
'NetworkInterfaceIPConfigurationsOperations',
'NetworkInterfaceLoadBalancersOperations',
'NetworkInterfaceTapConfigurationsOperations',
'NetworkProfilesOperations',
'NetworkSecurityGroupsOperations',
'SecurityRulesOperations',
'DefaultSecurityRulesOperations',
'NetworkWatchersOperations',
'PacketCapturesOperations',
'ConnectionMonitorsOperations',
'Operations',
'PublicIPAddressesOperations',
'PublicIPPrefixesOperations',
'RouteFiltersOperations',
'RouteFilterRulesOperations',
'RouteTablesOperations',
'RoutesOperations',
'BgpServiceCommunitiesOperations',
'ServiceEndpointPoliciesOperations',
'ServiceEndpointPolicyDefinitionsOperations',
'UsagesOperations',
'VirtualNetworksOperations',
'SubnetsOperations',
'VirtualNetworkPeeringsOperations',
'VirtualNetworkTapsOperations',
'VirtualNetworkGatewaysOperations',
'VirtualNetworkGatewayConnectionsOperations',
'LocalNetworkGatewaysOperations',
'VirtualWansOperations',
'VpnSitesOperations',
'VpnSitesConfigurationOperations',
'VirtualHubsOperations',
'HubVirtualNetworkConnectionsOperations',
'VpnGatewaysOperations',
'VpnConnectionsOperations',
'P2SVpnServerConfigurationsOperations',
'P2SVpnGatewaysOperations',
]
| 46.858108 | 94 | 0.839366 |
from ._operations import ApplicationGatewaysOperations
from ._operations import ApplicationSecurityGroupsOperations
from ._operations import AvailableDelegationsOperations
from ._operations import AvailableResourceGroupDelegationsOperations
from ._operations import AzureFirewallsOperations
from ._operations import AzureFirewallFqdnTagsOperations
from ._operations import NetworkManagementClientOperationsMixin
from ._operations import DdosProtectionPlansOperations
from ._operations import AvailableEndpointServicesOperations
from ._operations import ExpressRouteCircuitAuthorizationsOperations
from ._operations import ExpressRouteCircuitPeeringsOperations
from ._operations import ExpressRouteCircuitConnectionsOperations
from ._operations import ExpressRouteCircuitsOperations
from ._operations import ExpressRouteServiceProvidersOperations
from ._operations import ExpressRouteCrossConnectionsOperations
from ._operations import ExpressRouteCrossConnectionPeeringsOperations
from ._operations import ExpressRouteGatewaysOperations
from ._operations import ExpressRouteConnectionsOperations
from ._operations import ExpressRoutePortsLocationsOperations
from ._operations import ExpressRoutePortsOperations
from ._operations import ExpressRouteLinksOperations
from ._operations import InterfaceEndpointsOperations
from ._operations import LoadBalancersOperations
from ._operations import LoadBalancerBackendAddressPoolsOperations
from ._operations import LoadBalancerFrontendIPConfigurationsOperations
from ._operations import InboundNatRulesOperations
from ._operations import LoadBalancerLoadBalancingRulesOperations
from ._operations import LoadBalancerOutboundRulesOperations
from ._operations import LoadBalancerNetworkInterfacesOperations
from ._operations import LoadBalancerProbesOperations
from ._operations import NetworkInterfacesOperations
from ._operations import NetworkInterfaceIPConfigurationsOperations
from ._operations import NetworkInterfaceLoadBalancersOperations
from ._operations import NetworkInterfaceTapConfigurationsOperations
from ._operations import NetworkProfilesOperations
from ._operations import NetworkSecurityGroupsOperations
from ._operations import SecurityRulesOperations
from ._operations import DefaultSecurityRulesOperations
from ._operations import NetworkWatchersOperations
from ._operations import PacketCapturesOperations
from ._operations import ConnectionMonitorsOperations
from ._operations import Operations
from ._operations import PublicIPAddressesOperations
from ._operations import PublicIPPrefixesOperations
from ._operations import RouteFiltersOperations
from ._operations import RouteFilterRulesOperations
from ._operations import RouteTablesOperations
from ._operations import RoutesOperations
from ._operations import BgpServiceCommunitiesOperations
from ._operations import ServiceEndpointPoliciesOperations
from ._operations import ServiceEndpointPolicyDefinitionsOperations
from ._operations import UsagesOperations
from ._operations import VirtualNetworksOperations
from ._operations import SubnetsOperations
from ._operations import VirtualNetworkPeeringsOperations
from ._operations import VirtualNetworkTapsOperations
from ._operations import VirtualNetworkGatewaysOperations
from ._operations import VirtualNetworkGatewayConnectionsOperations
from ._operations import LocalNetworkGatewaysOperations
from ._operations import VirtualWansOperations
from ._operations import VpnSitesOperations
from ._operations import VpnSitesConfigurationOperations
from ._operations import VirtualHubsOperations
from ._operations import HubVirtualNetworkConnectionsOperations
from ._operations import VpnGatewaysOperations
from ._operations import VpnConnectionsOperations
from ._operations import P2SVpnServerConfigurationsOperations
from ._operations import P2SVpnGatewaysOperations
__all__ = [
'ApplicationGatewaysOperations',
'ApplicationSecurityGroupsOperations',
'AvailableDelegationsOperations',
'AvailableResourceGroupDelegationsOperations',
'AzureFirewallsOperations',
'AzureFirewallFqdnTagsOperations',
'NetworkManagementClientOperationsMixin',
'DdosProtectionPlansOperations',
'AvailableEndpointServicesOperations',
'ExpressRouteCircuitAuthorizationsOperations',
'ExpressRouteCircuitPeeringsOperations',
'ExpressRouteCircuitConnectionsOperations',
'ExpressRouteCircuitsOperations',
'ExpressRouteServiceProvidersOperations',
'ExpressRouteCrossConnectionsOperations',
'ExpressRouteCrossConnectionPeeringsOperations',
'ExpressRouteGatewaysOperations',
'ExpressRouteConnectionsOperations',
'ExpressRoutePortsLocationsOperations',
'ExpressRoutePortsOperations',
'ExpressRouteLinksOperations',
'InterfaceEndpointsOperations',
'LoadBalancersOperations',
'LoadBalancerBackendAddressPoolsOperations',
'LoadBalancerFrontendIPConfigurationsOperations',
'InboundNatRulesOperations',
'LoadBalancerLoadBalancingRulesOperations',
'LoadBalancerOutboundRulesOperations',
'LoadBalancerNetworkInterfacesOperations',
'LoadBalancerProbesOperations',
'NetworkInterfacesOperations',
'NetworkInterfaceIPConfigurationsOperations',
'NetworkInterfaceLoadBalancersOperations',
'NetworkInterfaceTapConfigurationsOperations',
'NetworkProfilesOperations',
'NetworkSecurityGroupsOperations',
'SecurityRulesOperations',
'DefaultSecurityRulesOperations',
'NetworkWatchersOperations',
'PacketCapturesOperations',
'ConnectionMonitorsOperations',
'Operations',
'PublicIPAddressesOperations',
'PublicIPPrefixesOperations',
'RouteFiltersOperations',
'RouteFilterRulesOperations',
'RouteTablesOperations',
'RoutesOperations',
'BgpServiceCommunitiesOperations',
'ServiceEndpointPoliciesOperations',
'ServiceEndpointPolicyDefinitionsOperations',
'UsagesOperations',
'VirtualNetworksOperations',
'SubnetsOperations',
'VirtualNetworkPeeringsOperations',
'VirtualNetworkTapsOperations',
'VirtualNetworkGatewaysOperations',
'VirtualNetworkGatewayConnectionsOperations',
'LocalNetworkGatewaysOperations',
'VirtualWansOperations',
'VpnSitesOperations',
'VpnSitesConfigurationOperations',
'VirtualHubsOperations',
'HubVirtualNetworkConnectionsOperations',
'VpnGatewaysOperations',
'VpnConnectionsOperations',
'P2SVpnServerConfigurationsOperations',
'P2SVpnGatewaysOperations',
]
| true | true |
f724cac7525d981babd6c466078597e54db40a89 | 1,080 | py | Python | step2_run_perl_RT_fdda_reformat_obsnud.py | M2LabOrg/WRF_little_r | 8f46e733387db4c62f39426a03b6a03b3b406b0e | [
"Apache-2.0"
] | 1 | 2021-09-14T06:41:02.000Z | 2021-09-14T06:41:02.000Z | step2_run_perl_RT_fdda_reformat_obsnud.py | M2LabOrg/WRF_little_r | 8f46e733387db4c62f39426a03b6a03b3b406b0e | [
"Apache-2.0"
] | null | null | null | step2_run_perl_RT_fdda_reformat_obsnud.py | M2LabOrg/WRF_little_r | 8f46e733387db4c62f39426a03b6a03b3b406b0e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
This script creates WRF ready files using the little_r formatted files.
They are part of the input files needed for observation nudging.
Note that:
- You need to step1_run_time_series_converter.py first
- Here we convert to the format needed by WRF
- You do that by running:
$ perl RT_fdda_reformat_obsnud.pl OUTPUT/obs:2021-04-14_02
$ perl RT_fdda_reformat_obsnud.pl OUTPUT/obs:2021-04-14_03
$ (and so on)
- This will produce files with extension .obsnud, which you will concatenate
(see example below)
- You will also need to change the file name to OBS_DOMAIN101 for domain 1,
and OBS_DOMAIN201 for domain 2, and so on, as described in the WRF Users' manual
$ cat *.obsnud >> OBS_DOMAIN101
Adapted here by: Michel Mesquita, Ph.D. (July 2021)
"""
import os
import glob
for filepath in glob.iglob('OUTPUT_STEP1/*'):
print(filepath)
filename = os.path.basename(filepath)
os.system(f"perl RT_fdda_reformat_obsnud.pl {filepath}")
os.system("mv OUTPUT_STEP1/*.obsnud OUTPUT_STEP2/")
os.system("rm OUTPUT_STEP1/*.tmp")
| 28.421053 | 82 | 0.746296 |
import os
import glob
for filepath in glob.iglob('OUTPUT_STEP1/*'):
print(filepath)
filename = os.path.basename(filepath)
os.system(f"perl RT_fdda_reformat_obsnud.pl {filepath}")
os.system("mv OUTPUT_STEP1/*.obsnud OUTPUT_STEP2/")
os.system("rm OUTPUT_STEP1/*.tmp")
| true | true |
f724cad0080defcb0f50376906f3de9ab0cedd9e | 440 | py | Python | invenio_i18n/version.py | mvidalgarcia/invenio-i18n | 123b3db1538529ebb5eff165802d387d3337c7d1 | [
"MIT"
] | null | null | null | invenio_i18n/version.py | mvidalgarcia/invenio-i18n | 123b3db1538529ebb5eff165802d387d3337c7d1 | [
"MIT"
] | null | null | null | invenio_i18n/version.py | mvidalgarcia/invenio-i18n | 123b3db1538529ebb5eff165802d387d3337c7d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2020 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Version information for Invenio-I18N.
This file is imported by ``invenio_i18n.__init__``,
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = '1.2.0'
| 24.444444 | 72 | 0.722727 |
from __future__ import absolute_import, print_function
__version__ = '1.2.0'
| true | true |
f724cb2af35070c9347e3228b9791a0c3e03873f | 13,189 | py | Python | test/functional/test_framework/wallet.py | dathx/bitcoin | 57982f419e36d0023c83af2dd0d683ca3160dc2a | [
"MIT"
] | 459 | 2015-09-25T22:46:28.000Z | 2022-02-27T18:01:48.000Z | test/functional/test_framework/wallet.py | dathx/bitcoin | 57982f419e36d0023c83af2dd0d683ca3160dc2a | [
"MIT"
] | 472 | 2015-09-17T09:42:03.000Z | 2022-03-29T05:29:04.000Z | test/functional/test_framework/wallet.py | dathx/bitcoin | 57982f419e36d0023c83af2dd0d683ca3160dc2a | [
"MIT"
] | 209 | 2015-10-04T00:49:49.000Z | 2022-03-24T11:05:09.000Z | #!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""A limited-functionality wallet, which may replace a real wallet in tests"""
from copy import deepcopy
from decimal import Decimal
from enum import Enum
from random import choice
from typing import Optional
from test_framework.address import create_deterministic_address_bcrt1_p2tr_op_true
from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
tx_from_hex,
)
from test_framework.script import (
CScript,
LegacySignatureHash,
LEAF_VERSION_TAPSCRIPT,
OP_NOP,
OP_TRUE,
SIGHASH_ALL,
)
from test_framework.script_util import (
key_to_p2pk_script,
key_to_p2wpkh_script,
)
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
)
DEFAULT_FEE = Decimal("0.0001")
class MiniWalletMode(Enum):
"""Determines the transaction type the MiniWallet is creating and spending.
For most purposes, the default mode ADDRESS_OP_TRUE should be sufficient;
it simply uses a fixed bech32m P2TR address whose coins are spent with a
witness stack of OP_TRUE, i.e. following an anyone-can-spend policy.
However, if the transactions need to be modified by the user (e.g. prepending
scriptSig for testing opcodes that are activated by a soft-fork), or the txs
should contain an actual signature, the raw modes RAW_OP_TRUE and RAW_P2PK
can be useful. Summary of modes:
| output | | tx is | can modify | needs
mode | description | address | standard | scriptSig | signing
----------------+-------------------+-----------+----------+------------+----------
ADDRESS_OP_TRUE | anyone-can-spend | bech32m | yes | no | no
RAW_OP_TRUE | anyone-can-spend | - (raw) | no | yes | no
RAW_P2PK | pay-to-public-key | - (raw) | yes | yes | yes
"""
ADDRESS_OP_TRUE = 1
RAW_OP_TRUE = 2
RAW_P2PK = 3
class MiniWallet:
def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE):
self._test_node = test_node
self._utxos = []
self._priv_key = None
self._address = None
assert isinstance(mode, MiniWalletMode)
if mode == MiniWalletMode.RAW_OP_TRUE:
self._scriptPubKey = bytes(CScript([OP_TRUE]))
elif mode == MiniWalletMode.RAW_P2PK:
# use simple deterministic private key (k=1)
self._priv_key = ECKey()
self._priv_key.set((1).to_bytes(32, 'big'), True)
pub_key = self._priv_key.get_pubkey()
self._scriptPubKey = key_to_p2pk_script(pub_key.get_bytes())
elif mode == MiniWalletMode.ADDRESS_OP_TRUE:
self._address, self._internal_key = create_deterministic_address_bcrt1_p2tr_op_true()
self._scriptPubKey = bytes.fromhex(self._test_node.validateaddress(self._address)['scriptPubKey'])
def rescan_utxos(self):
"""Drop all utxos and rescan the utxo set"""
self._utxos = []
res = self._test_node.scantxoutset(action="start", scanobjects=[self.get_descriptor()])
assert_equal(True, res['success'])
for utxo in res['unspents']:
self._utxos.append({'txid': utxo['txid'], 'vout': utxo['vout'], 'value': utxo['amount'], 'height': utxo['height']})
def scan_tx(self, tx):
"""Scan the tx for self._scriptPubKey outputs and add them to self._utxos"""
for out in tx['vout']:
if out['scriptPubKey']['hex'] == self._scriptPubKey.hex():
self._utxos.append({'txid': tx['txid'], 'vout': out['n'], 'value': out['value'], 'height': 0})
def sign_tx(self, tx, fixed_length=True):
"""Sign tx that has been created by MiniWallet in P2PK mode"""
assert self._priv_key is not None
(sighash, err) = LegacySignatureHash(CScript(self._scriptPubKey), tx, 0, SIGHASH_ALL)
assert err is None
# for exact fee calculation, create only signatures with fixed size by default (>49.89% probability):
# 65 bytes: high-R val (33 bytes) + low-S val (32 bytes)
# with the DER header/skeleton data of 6 bytes added, this leads to a target size of 71 bytes
der_sig = b''
while not len(der_sig) == 71:
der_sig = self._priv_key.sign_ecdsa(sighash)
if not fixed_length:
break
tx.vin[0].scriptSig = CScript([der_sig + bytes(bytearray([SIGHASH_ALL]))])
def generate(self, num_blocks, **kwargs):
"""Generate blocks with coinbase outputs to the internal address, and append the outputs to the internal list"""
blocks = self._test_node.generatetodescriptor(num_blocks, self.get_descriptor(), **kwargs)
for b in blocks:
block_info = self._test_node.getblock(blockhash=b, verbosity=2)
cb_tx = block_info['tx'][0]
self._utxos.append({'txid': cb_tx['txid'], 'vout': 0, 'value': cb_tx['vout'][0]['value'], 'height': block_info['height']})
return blocks
def get_descriptor(self):
return descsum_create(f'raw({self._scriptPubKey.hex()})')
def get_address(self):
return self._address
def get_utxo(self, *, txid: Optional[str]='', mark_as_spent=True):
"""
Returns a utxo and marks it as spent (pops it from the internal list)
Args:
txid: get the first utxo we find from a specific transaction
"""
index = -1 # by default the last utxo
self._utxos = sorted(self._utxos, key=lambda k: (k['value'], -k['height'])) # Put the largest utxo last
if txid:
utxo = next(filter(lambda utxo: txid == utxo['txid'], self._utxos))
index = self._utxos.index(utxo)
if mark_as_spent:
return self._utxos.pop(index)
else:
return self._utxos[index]
def send_self_transfer(self, **kwargs):
"""Create and send a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed."""
tx = self.create_self_transfer(**kwargs)
self.sendrawtransaction(from_node=kwargs['from_node'], tx_hex=tx['hex'])
return tx
def send_to(self, *, from_node, scriptPubKey, amount, fee=1000):
"""
Create and send a tx with an output to a given scriptPubKey/amount,
plus a change output to our internal address. To keep things simple, a
fixed fee given in Satoshi is used.
Note that this method fails if there is no single internal utxo
available that can cover the cost for the amount and the fixed fee
(the utxo with the largest value is taken).
Returns a tuple (txid, n) referring to the created external utxo outpoint.
"""
tx = self.create_self_transfer(from_node=from_node, fee_rate=0, mempool_valid=False)['tx']
assert_greater_than_or_equal(tx.vout[0].nValue, amount + fee)
tx.vout[0].nValue -= (amount + fee) # change output -> MiniWallet
tx.vout.append(CTxOut(amount, scriptPubKey)) # arbitrary output -> to be returned
txid = self.sendrawtransaction(from_node=from_node, tx_hex=tx.serialize().hex())
return txid, 1
def create_self_transfer(self, *, fee_rate=Decimal("0.003"), from_node, utxo_to_spend=None, mempool_valid=True, locktime=0, sequence=0):
"""Create and return a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed."""
utxo_to_spend = utxo_to_spend or self.get_utxo()
if self._priv_key is None:
vsize = Decimal(104) # anyone-can-spend
else:
vsize = Decimal(168) # P2PK (73 bytes scriptSig + 35 bytes scriptPubKey + 60 bytes other)
send_value = int(COIN * (utxo_to_spend['value'] - fee_rate * (vsize / 1000)))
assert send_value > 0
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(utxo_to_spend['txid'], 16), utxo_to_spend['vout']), nSequence=sequence)]
tx.vout = [CTxOut(send_value, self._scriptPubKey)]
tx.nLockTime = locktime
if not self._address:
# raw script
if self._priv_key is not None:
# P2PK, need to sign
self.sign_tx(tx)
else:
# anyone-can-spend
tx.vin[0].scriptSig = CScript([OP_NOP] * 43) # pad to identical size
else:
tx.wit.vtxinwit = [CTxInWitness()]
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE]), bytes([LEAF_VERSION_TAPSCRIPT]) + self._internal_key]
tx_hex = tx.serialize().hex()
tx_info = from_node.testmempoolaccept([tx_hex])[0]
assert_equal(mempool_valid, tx_info['allowed'])
if mempool_valid:
assert_equal(tx_info['vsize'], vsize)
assert_equal(tx_info['fees']['base'], utxo_to_spend['value'] - Decimal(send_value) / COIN)
return {'txid': tx_info['txid'], 'wtxid': tx_info['wtxid'], 'hex': tx_hex, 'tx': tx}
def sendrawtransaction(self, *, from_node, tx_hex):
txid = from_node.sendrawtransaction(tx_hex)
self.scan_tx(from_node.decoderawtransaction(tx_hex))
return txid
def random_p2wpkh():
"""Generate a random P2WPKH scriptPubKey. Can be used when a random destination is needed,
but no compiled wallet is available (e.g. as replacement to the getnewaddress RPC)."""
key = ECKey()
key.generate()
return key_to_p2wpkh_script(key.get_pubkey().get_bytes())
def make_chain(node, address, privkeys, parent_txid, parent_value, n=0, parent_locking_script=None, fee=DEFAULT_FEE):
"""Build a transaction that spends parent_txid.vout[n] and produces one output with
amount = parent_value with a fee deducted.
Return tuple (CTransaction object, raw hex, nValue, scriptPubKey of the output created).
"""
inputs = [{"txid": parent_txid, "vout": n}]
my_value = parent_value - fee
outputs = {address : my_value}
rawtx = node.createrawtransaction(inputs, outputs)
prevtxs = [{
"txid": parent_txid,
"vout": n,
"scriptPubKey": parent_locking_script,
"amount": parent_value,
}] if parent_locking_script else None
signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=privkeys, prevtxs=prevtxs)
assert signedtx["complete"]
tx = tx_from_hex(signedtx["hex"])
return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex())
def create_child_with_parents(node, address, privkeys, parents_tx, values, locking_scripts, fee=DEFAULT_FEE):
"""Creates a transaction that spends the first output of each parent in parents_tx."""
num_parents = len(parents_tx)
total_value = sum(values)
inputs = [{"txid": tx.rehash(), "vout": 0} for tx in parents_tx]
outputs = {address : total_value - fee}
rawtx_child = node.createrawtransaction(inputs, outputs)
prevtxs = []
for i in range(num_parents):
prevtxs.append({"txid": parents_tx[i].rehash(), "vout": 0, "scriptPubKey": locking_scripts[i], "amount": values[i]})
signedtx_child = node.signrawtransactionwithkey(hexstring=rawtx_child, privkeys=privkeys, prevtxs=prevtxs)
assert signedtx_child["complete"]
return signedtx_child["hex"]
def create_raw_chain(node, first_coin, address, privkeys, chain_length=25):
"""Helper function: create a "chain" of chain_length transactions. The nth transaction in the
chain is a child of the n-1th transaction and parent of the n+1th transaction.
"""
parent_locking_script = None
txid = first_coin["txid"]
chain_hex = []
chain_txns = []
value = first_coin["amount"]
for _ in range(chain_length):
(tx, txhex, value, parent_locking_script) = make_chain(node, address, privkeys, txid, value, 0, parent_locking_script)
txid = tx.rehash()
chain_hex.append(txhex)
chain_txns.append(tx)
return (chain_hex, chain_txns)
def bulk_transaction(tx, node, target_weight, privkeys, prevtxs=None):
"""Pad a transaction with extra outputs until it reaches a target weight (or higher).
returns CTransaction object
"""
tx_heavy = deepcopy(tx)
assert_greater_than_or_equal(target_weight, tx_heavy.get_weight())
while tx_heavy.get_weight() < target_weight:
random_spk = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for _ in range(512*2):
random_spk += choice("0123456789ABCDEF")
tx_heavy.vout.append(CTxOut(0, bytes.fromhex(random_spk)))
# Re-sign the transaction
if privkeys:
signed = node.signrawtransactionwithkey(tx_heavy.serialize().hex(), privkeys, prevtxs)
return tx_from_hex(signed["hex"])
# OP_TRUE
tx_heavy.wit.vtxinwit = [CTxInWitness()]
tx_heavy.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
return tx_heavy
| 45.323024 | 140 | 0.656759 |
from copy import deepcopy
from decimal import Decimal
from enum import Enum
from random import choice
from typing import Optional
from test_framework.address import create_deterministic_address_bcrt1_p2tr_op_true
from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
tx_from_hex,
)
from test_framework.script import (
CScript,
LegacySignatureHash,
LEAF_VERSION_TAPSCRIPT,
OP_NOP,
OP_TRUE,
SIGHASH_ALL,
)
from test_framework.script_util import (
key_to_p2pk_script,
key_to_p2wpkh_script,
)
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
)
DEFAULT_FEE = Decimal("0.0001")
class MiniWalletMode(Enum):
ADDRESS_OP_TRUE = 1
RAW_OP_TRUE = 2
RAW_P2PK = 3
class MiniWallet:
def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE):
self._test_node = test_node
self._utxos = []
self._priv_key = None
self._address = None
assert isinstance(mode, MiniWalletMode)
if mode == MiniWalletMode.RAW_OP_TRUE:
self._scriptPubKey = bytes(CScript([OP_TRUE]))
elif mode == MiniWalletMode.RAW_P2PK:
self._priv_key = ECKey()
self._priv_key.set((1).to_bytes(32, 'big'), True)
pub_key = self._priv_key.get_pubkey()
self._scriptPubKey = key_to_p2pk_script(pub_key.get_bytes())
elif mode == MiniWalletMode.ADDRESS_OP_TRUE:
self._address, self._internal_key = create_deterministic_address_bcrt1_p2tr_op_true()
self._scriptPubKey = bytes.fromhex(self._test_node.validateaddress(self._address)['scriptPubKey'])
def rescan_utxos(self):
self._utxos = []
res = self._test_node.scantxoutset(action="start", scanobjects=[self.get_descriptor()])
assert_equal(True, res['success'])
for utxo in res['unspents']:
self._utxos.append({'txid': utxo['txid'], 'vout': utxo['vout'], 'value': utxo['amount'], 'height': utxo['height']})
def scan_tx(self, tx):
for out in tx['vout']:
if out['scriptPubKey']['hex'] == self._scriptPubKey.hex():
self._utxos.append({'txid': tx['txid'], 'vout': out['n'], 'value': out['value'], 'height': 0})
def sign_tx(self, tx, fixed_length=True):
assert self._priv_key is not None
(sighash, err) = LegacySignatureHash(CScript(self._scriptPubKey), tx, 0, SIGHASH_ALL)
assert err is None
der_sig = b''
while not len(der_sig) == 71:
der_sig = self._priv_key.sign_ecdsa(sighash)
if not fixed_length:
break
tx.vin[0].scriptSig = CScript([der_sig + bytes(bytearray([SIGHASH_ALL]))])
def generate(self, num_blocks, **kwargs):
blocks = self._test_node.generatetodescriptor(num_blocks, self.get_descriptor(), **kwargs)
for b in blocks:
block_info = self._test_node.getblock(blockhash=b, verbosity=2)
cb_tx = block_info['tx'][0]
self._utxos.append({'txid': cb_tx['txid'], 'vout': 0, 'value': cb_tx['vout'][0]['value'], 'height': block_info['height']})
return blocks
def get_descriptor(self):
return descsum_create(f'raw({self._scriptPubKey.hex()})')
def get_address(self):
return self._address
def get_utxo(self, *, txid: Optional[str]='', mark_as_spent=True):
index = -1
self._utxos = sorted(self._utxos, key=lambda k: (k['value'], -k['height']))
if txid:
utxo = next(filter(lambda utxo: txid == utxo['txid'], self._utxos))
index = self._utxos.index(utxo)
if mark_as_spent:
return self._utxos.pop(index)
else:
return self._utxos[index]
def send_self_transfer(self, **kwargs):
tx = self.create_self_transfer(**kwargs)
self.sendrawtransaction(from_node=kwargs['from_node'], tx_hex=tx['hex'])
return tx
def send_to(self, *, from_node, scriptPubKey, amount, fee=1000):
tx = self.create_self_transfer(from_node=from_node, fee_rate=0, mempool_valid=False)['tx']
assert_greater_than_or_equal(tx.vout[0].nValue, amount + fee)
tx.vout[0].nValue -= (amount + fee)
tx.vout.append(CTxOut(amount, scriptPubKey))
txid = self.sendrawtransaction(from_node=from_node, tx_hex=tx.serialize().hex())
return txid, 1
def create_self_transfer(self, *, fee_rate=Decimal("0.003"), from_node, utxo_to_spend=None, mempool_valid=True, locktime=0, sequence=0):
utxo_to_spend = utxo_to_spend or self.get_utxo()
if self._priv_key is None:
vsize = Decimal(104)
else:
vsize = Decimal(168)
send_value = int(COIN * (utxo_to_spend['value'] - fee_rate * (vsize / 1000)))
assert send_value > 0
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(utxo_to_spend['txid'], 16), utxo_to_spend['vout']), nSequence=sequence)]
tx.vout = [CTxOut(send_value, self._scriptPubKey)]
tx.nLockTime = locktime
if not self._address:
if self._priv_key is not None:
self.sign_tx(tx)
else:
tx.vin[0].scriptSig = CScript([OP_NOP] * 43)
else:
tx.wit.vtxinwit = [CTxInWitness()]
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE]), bytes([LEAF_VERSION_TAPSCRIPT]) + self._internal_key]
tx_hex = tx.serialize().hex()
tx_info = from_node.testmempoolaccept([tx_hex])[0]
assert_equal(mempool_valid, tx_info['allowed'])
if mempool_valid:
assert_equal(tx_info['vsize'], vsize)
assert_equal(tx_info['fees']['base'], utxo_to_spend['value'] - Decimal(send_value) / COIN)
return {'txid': tx_info['txid'], 'wtxid': tx_info['wtxid'], 'hex': tx_hex, 'tx': tx}
def sendrawtransaction(self, *, from_node, tx_hex):
txid = from_node.sendrawtransaction(tx_hex)
self.scan_tx(from_node.decoderawtransaction(tx_hex))
return txid
def random_p2wpkh():
key = ECKey()
key.generate()
return key_to_p2wpkh_script(key.get_pubkey().get_bytes())
def make_chain(node, address, privkeys, parent_txid, parent_value, n=0, parent_locking_script=None, fee=DEFAULT_FEE):
inputs = [{"txid": parent_txid, "vout": n}]
my_value = parent_value - fee
outputs = {address : my_value}
rawtx = node.createrawtransaction(inputs, outputs)
prevtxs = [{
"txid": parent_txid,
"vout": n,
"scriptPubKey": parent_locking_script,
"amount": parent_value,
}] if parent_locking_script else None
signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=privkeys, prevtxs=prevtxs)
assert signedtx["complete"]
tx = tx_from_hex(signedtx["hex"])
return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex())
def create_child_with_parents(node, address, privkeys, parents_tx, values, locking_scripts, fee=DEFAULT_FEE):
num_parents = len(parents_tx)
total_value = sum(values)
inputs = [{"txid": tx.rehash(), "vout": 0} for tx in parents_tx]
outputs = {address : total_value - fee}
rawtx_child = node.createrawtransaction(inputs, outputs)
prevtxs = []
for i in range(num_parents):
prevtxs.append({"txid": parents_tx[i].rehash(), "vout": 0, "scriptPubKey": locking_scripts[i], "amount": values[i]})
signedtx_child = node.signrawtransactionwithkey(hexstring=rawtx_child, privkeys=privkeys, prevtxs=prevtxs)
assert signedtx_child["complete"]
return signedtx_child["hex"]
def create_raw_chain(node, first_coin, address, privkeys, chain_length=25):
parent_locking_script = None
txid = first_coin["txid"]
chain_hex = []
chain_txns = []
value = first_coin["amount"]
for _ in range(chain_length):
(tx, txhex, value, parent_locking_script) = make_chain(node, address, privkeys, txid, value, 0, parent_locking_script)
txid = tx.rehash()
chain_hex.append(txhex)
chain_txns.append(tx)
return (chain_hex, chain_txns)
def bulk_transaction(tx, node, target_weight, privkeys, prevtxs=None):
tx_heavy = deepcopy(tx)
assert_greater_than_or_equal(target_weight, tx_heavy.get_weight())
while tx_heavy.get_weight() < target_weight:
random_spk = "6a4d0200"
for _ in range(512*2):
random_spk += choice("0123456789ABCDEF")
tx_heavy.vout.append(CTxOut(0, bytes.fromhex(random_spk)))
if privkeys:
signed = node.signrawtransactionwithkey(tx_heavy.serialize().hex(), privkeys, prevtxs)
return tx_from_hex(signed["hex"])
tx_heavy.wit.vtxinwit = [CTxInWitness()]
tx_heavy.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
return tx_heavy
| true | true |
f724cb595d2162e3e332ee66f877f324cfe44a48 | 16,524 | py | Python | modules/routes/user/forms.py | manu-p-1/meet | 5e6865a9b5035e324ab0b7cf5a9a71383dfcac9d | [
"MIT"
] | 3 | 2020-08-27T20:15:52.000Z | 2022-02-19T12:05:11.000Z | modules/routes/user/forms.py | manu-p-1/meet | 5e6865a9b5035e324ab0b7cf5a9a71383dfcac9d | [
"MIT"
] | null | null | null | modules/routes/user/forms.py | manu-p-1/meet | 5e6865a9b5035e324ab0b7cf5a9a71383dfcac9d | [
"MIT"
] | 2 | 2020-09-26T00:37:46.000Z | 2021-09-23T03:45:00.000Z | import sys
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, TextAreaField, SelectField, BooleanField, DecimalField, HiddenField, \
RadioField, FieldList
from wtforms.validators import InputRequired, NumberRange, Length, AnyOf
from wtforms.widgets.html5 import NumberInput
from modules.routes.user.custom_fields import EmployeeInfoTextAreaField
from modules.routes.user.custom_validators import RequiredIf, EmployeeUnique, EndDateProper, \
StartDateProper, RequiredIfRadioField, VelocityUsageLimit, DeptBalance
from modules.routes.utils.custom_fields import InlineSubmitField
from server import client
DEPT_MAPPINGS = [
('', 'Please Choose a fund Destination'),
('IT', 'IT'), ('AC', 'ACCOUNTING'), ('MK', 'MARKETING'), ('HR', 'HUMAN RESOURCES'),
('PD', 'PRODUCTION'), ('RD', 'RESEARCH & DEVELOPMENT'), ('SC', 'SECURITY'), ('LG', 'LOGISTICS')
]
def create_plan_form(sn):
"""
CREATE PLAN FORM
:param sn: Session Dictionary
:return: A Create Plan Form
"""
class CreatePlanForm(get_plan_base(sn)):
create_plan_btn = InlineSubmitField("Create Plan", btn_text="Create Plan",
render_kw={"class": "btn btn-primary btn-block"})
return CreatePlanForm()
def get_plan_form(sn: dict):
"""
GET PLAN FORM
:param sn: Session Dictionary
:return: A Manage Plan Form
"""
class ManagePlanForm(get_plan_base(sn)):
update_plan_btn = InlineSubmitField("Update Plan", btn_text="Update Plan",
render_kw={"class": "btn btn-primary btn-block"})
return ManagePlanForm()
def get_plan_base(sn: dict):
"""
GET REFERENCE TO PLAN BASE FORM
:param sn: Session Dictionary
:return: A Plan Form
"""
class Plan(FlaskForm):
DISB_ALL = "DISB_ALL"
DISB_INDIV = "DISB_INDIV"
plan_name = StringField("Plan Name",
validators=[
InputRequired(message="Enter a plan name."),
Length(min=2, max=255, message="Plan name was too short or too long")
],
render_kw={"placeholder": "Plan Name",
"class": "form-control"})
funding_amount = DecimalField('Per-Employee Funding Amount',
validators=[
InputRequired(message="Enter a funding amount."),
NumberRange(min=MINIMUM_FUND_AMT,
message=f"The minimum funding amount must be at "
f"least ${MINIMUM_FUND_AMT}."),
DeptBalance(client=client, sn=sn)
],
render_kw={"placeholder": "Funding Amount",
"class": "form-control"},
widget=NumberInput())
plan_justification = StringField('Plan Justification (e.g. Travel, Equipment, Party)',
validators=[
InputRequired(message="A plan justification is required."),
Length(min=3, max=50,
message="Plan justification was either too short or too long.")
],
render_kw={"placeholder": "Plan Justification",
"class": "form-control"})
memo = TextAreaField('Memo (min 10 chars, max 255 chars.)',
validators=[
InputRequired("A memo is required."),
Length(min=10, max=255, message="Memo was either too short or too long.")
],
render_kw={"rows": 4,
"maxlength": 255,
"placeholder": "Memo Description",
"class": "form-control"})
start_date = StringField('Start Date/Times',
validators=[
InputRequired(message="A start date is required."),
StartDateProper()
],
render_kw={"placeholder": "Start Date/Times",
"class": "form-control"})
source_fund = SelectField('Fund Source',
validators=[InputRequired(message="A funding source department is required.")],
choices=[
(
sn['manager_dept'],
client.READABLE_DEPARTMENTS[sn['manager_dept']]
)
],
render_kw={"class": "form-control"})
dest_fund = SelectField('Fund Destination',
validators=[InputRequired(message="A funding destination department is required.")],
choices=DEPT_MAPPINGS,
render_kw={"class": "form-control"})
has_fund_individuals = BooleanField('Employee specific disbursement',
render_kw={"class": "custom-control-input"})
disbursement_type = RadioField('Employee Disbursement Type', choices=[
(DISB_ALL, 'Disburse to all Employees'),
(DISB_INDIV, 'Search for an Employee'),
], default=DISB_ALL, validators=[RequiredIf('has_fund_individuals',
message="To disburse funds, search for an employee or disburse"
"to all employees")])
employees_list = FieldList(EmployeeInfoTextAreaField('employees_list',
validators=[
RequiredIfRadioField(
'disbursement_type',
DISB_INDIV,
message="Please specify at "
"least 1 employee to "
"disburse funds to.")
]),
validators=[EmployeeUnique(object_name="employee id's")],
min_entries=1,
max_entries=24)
has_end_date = BooleanField('Add End Date',
render_kw={"class": "custom-control-input"})
end_date = StringField('End Date/Times',
validators=[
RequiredIf("has_end_date", message="The end date is required."),
EndDateProper(),
],
render_kw={"placeholder": "Date Date/Times",
"class": "form-control"})
has_velocity_controls = BooleanField('Add Velocity Controls',
render_kw={"class": "custom-control-input"})
vel_control_name = StringField('Control Name',
validators=[
RequiredIf('has_velocity_controls',
message="The velocity control, control name is required."),
Length(max=50)
],
render_kw={"class": "form-control",
"placeholder": "Enter a Control Name"})
vel_control_window = SelectField('Control Window',
validators=[
RequiredIf('has_velocity_controls',
message="The velocity control, control window is required."),
Length(max=30)
],
choices=[
('', 'Select a Control Time Period'),
('day', 'DAY'),
('week', 'WEEK'),
('month', 'MONTH'),
('lifetime', 'LIFETIME'),
('transaction', 'TRANSACTION')
],
render_kw={"class": "form-control"})
vel_amt_limit = DecimalField('Amount Limit',
validators=[
RequiredIf('has_velocity_controls',
message="The velocity control amount limit is required."),
NumberRange(min=MINIMUM_CONTROL_AMT,
message=f"The minimum velocity control amount limit must be at "
f"least ${MINIMUM_CONTROL_AMT}."),
VelocityUsageLimit()
],
render_kw={"placeholder": "Amount Limit",
"class": "form-control"},
widget=NumberInput())
vel_usage_limit = IntegerField('Usage Limit (0 - 100)',
validators=[
RequiredIf('has_velocity_controls',
message="The velocity control usage limit is required."),
NumberRange(min=0, max=100,
message="The velocity control usage limit should be between "
"0 and 100, inclusive.")
],
render_kw={"placeholder": "Usage Limit",
"class": "form-control"},
widget=NumberInput())
time_zone = HiddenField(validators=[InputRequired(message="The timezone is a required field")])
priority = HiddenField(validators=[
InputRequired(message="Priority is a required field"),
AnyOf(values=["Low", "Medium", "High", "Urgent"], message="Priority must be Low, Medium, High, or Urgent")
], default="Low")
return Plan # Return a reference to the class and not an object!
class Forminator:
def __init__(self, form):
# These will always be required
self._form = form
self._plan_name: str = form.plan_name.data
self._funding_amount: str = form.funding_amount.data
self._plan_justification = form.plan_justification.data
self._memo = form.memo.data
self._start_date = form.start_date.data
self._source_fund = form.source_fund.data
self._dest_fund = form.dest_fund.data
# Here on out is optional
self._has_fund_individuals = form.has_fund_individuals.data
self._disbursement_type = form.disbursement_type.data
# We only want the field list here NOT the data
self._employees_list = form.employees_list
self._has_end_date = form.has_end_date.data
self._end_date = form.end_date.data
self._has_velocity_controls = form.has_velocity_controls.data
self._vel_control_name = form.vel_control_name.data
self._vel_control_window = form.vel_control_window.data
self._vel_amt_limit = form.vel_amt_limit.data
self._vel_usage_limit = form.vel_usage_limit.data
self._time_zone = form.time_zone.data
self._priority = form.priority.data
self.clean()
def clean(self):
self._scrub_plan_name()
self._scrub_plan_justification()
self._scrub_memo()
self._scrub_dates()
# strings are truthy
if self._vel_control_name:
self._scrub_vel_control_name()
def _scrub_plan_name(self):
self._plan_name = self.scrub_plan_name(self._plan_name)
def _scrub_plan_justification(self):
self._plan_justification = self.scrub_plan_name(self._plan_justification) # We just use the same filter
def _scrub_memo(self):
self._memo = self.scrub_plan_name(self._memo) # We just use the same filter
def _scrub_dates(self):
self._start_date = self.scrub_date(self._start_date)
if self._end_date:
self._end_date = self.scrub_date(self._end_date)
def _scrub_vel_control_name(self):
self._vel_control_name = self.scrub_plan_name(self._vel_control_name)
@staticmethod
def scrub_date(date):
return date.strip()
@staticmethod
def scrub_plan_name(name):
return " ".join(name.split()).capitalize()
def is_disbursed_all(self):
x = self.has_fund_individuals and self.disbursement_type == self._form.DISB_ALL
if x is None:
return False
return x
def retrieve(self):
return self
@property
def plan_name(self):
return self._plan_name
@property
def funding_amount(self):
return self._funding_amount
@property
def plan_justification(self):
return self._plan_justification
@property
def memo(self):
return self._memo
@property
def start_date(self):
return self._start_date
@property
def source_fund(self):
return self._source_fund
@property
def dest_fund(self):
return self._dest_fund
@property
def has_fund_individuals(self):
return self._has_fund_individuals
@property
def disbursement_type(self):
return self._disbursement_type if self._disbursement_type else None
@property
def employees_list(self):
e_list = self._employees_list.data
if len(e_list) != 0 and e_list[0] != '':
for employeeField in e_list:
yield employeeField
else:
yield []
@employees_list.setter
def employees_list(self, e_list):
self._employees_list.pop_entry() # Remove the default entry
[self._employees_list.append_entry(e) for e in e_list]
@property
def has_end_date(self):
return self._has_end_date if self._has_end_date else None
@property
def end_date(self):
return self._end_date if self._end_date else None
@property
def has_velocity_controls(self):
return self._has_velocity_controls if self._has_velocity_controls else None
@property
def vel_control_name(self):
return self._vel_control_name if self._vel_control_name else None
@property
def vel_control_window(self):
return self._vel_control_window if self._vel_control_window else None
@property
def vel_amt_limit(self):
return self._vel_amt_limit if self._vel_amt_limit else None
@property
def vel_usage_limit(self):
return self._vel_usage_limit if self._vel_usage_limit else None
@property
def raw_form(self):
return self._form
@property
def time_zone(self):
return self._time_zone
@property
def priority(self):
return self._priority
| 42.26087 | 118 | 0.489893 | import sys
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, TextAreaField, SelectField, BooleanField, DecimalField, HiddenField, \
RadioField, FieldList
from wtforms.validators import InputRequired, NumberRange, Length, AnyOf
from wtforms.widgets.html5 import NumberInput
from modules.routes.user.custom_fields import EmployeeInfoTextAreaField
from modules.routes.user.custom_validators import RequiredIf, EmployeeUnique, EndDateProper, \
StartDateProper, RequiredIfRadioField, VelocityUsageLimit, DeptBalance
from modules.routes.utils.custom_fields import InlineSubmitField
from server import client
DEPT_MAPPINGS = [
('', 'Please Choose a fund Destination'),
('IT', 'IT'), ('AC', 'ACCOUNTING'), ('MK', 'MARKETING'), ('HR', 'HUMAN RESOURCES'),
('PD', 'PRODUCTION'), ('RD', 'RESEARCH & DEVELOPMENT'), ('SC', 'SECURITY'), ('LG', 'LOGISTICS')
]
def create_plan_form(sn):
class CreatePlanForm(get_plan_base(sn)):
create_plan_btn = InlineSubmitField("Create Plan", btn_text="Create Plan",
render_kw={"class": "btn btn-primary btn-block"})
return CreatePlanForm()
def get_plan_form(sn: dict):
class ManagePlanForm(get_plan_base(sn)):
update_plan_btn = InlineSubmitField("Update Plan", btn_text="Update Plan",
render_kw={"class": "btn btn-primary btn-block"})
return ManagePlanForm()
def get_plan_base(sn: dict):
class Plan(FlaskForm):
DISB_ALL = "DISB_ALL"
DISB_INDIV = "DISB_INDIV"
plan_name = StringField("Plan Name",
validators=[
InputRequired(message="Enter a plan name."),
Length(min=2, max=255, message="Plan name was too short or too long")
],
render_kw={"placeholder": "Plan Name",
"class": "form-control"})
funding_amount = DecimalField('Per-Employee Funding Amount',
validators=[
InputRequired(message="Enter a funding amount."),
NumberRange(min=MINIMUM_FUND_AMT,
message=f"The minimum funding amount must be at "
f"least ${MINIMUM_FUND_AMT}."),
DeptBalance(client=client, sn=sn)
],
render_kw={"placeholder": "Funding Amount",
"class": "form-control"},
widget=NumberInput())
plan_justification = StringField('Plan Justification (e.g. Travel, Equipment, Party)',
validators=[
InputRequired(message="A plan justification is required."),
Length(min=3, max=50,
message="Plan justification was either too short or too long.")
],
render_kw={"placeholder": "Plan Justification",
"class": "form-control"})
memo = TextAreaField('Memo (min 10 chars, max 255 chars.)',
validators=[
InputRequired("A memo is required."),
Length(min=10, max=255, message="Memo was either too short or too long.")
],
render_kw={"rows": 4,
"maxlength": 255,
"placeholder": "Memo Description",
"class": "form-control"})
start_date = StringField('Start Date/Times',
validators=[
InputRequired(message="A start date is required."),
StartDateProper()
],
render_kw={"placeholder": "Start Date/Times",
"class": "form-control"})
source_fund = SelectField('Fund Source',
validators=[InputRequired(message="A funding source department is required.")],
choices=[
(
sn['manager_dept'],
client.READABLE_DEPARTMENTS[sn['manager_dept']]
)
],
render_kw={"class": "form-control"})
dest_fund = SelectField('Fund Destination',
validators=[InputRequired(message="A funding destination department is required.")],
choices=DEPT_MAPPINGS,
render_kw={"class": "form-control"})
has_fund_individuals = BooleanField('Employee specific disbursement',
render_kw={"class": "custom-control-input"})
disbursement_type = RadioField('Employee Disbursement Type', choices=[
(DISB_ALL, 'Disburse to all Employees'),
(DISB_INDIV, 'Search for an Employee'),
], default=DISB_ALL, validators=[RequiredIf('has_fund_individuals',
message="To disburse funds, search for an employee or disburse"
"to all employees")])
employees_list = FieldList(EmployeeInfoTextAreaField('employees_list',
validators=[
RequiredIfRadioField(
'disbursement_type',
DISB_INDIV,
message="Please specify at "
"least 1 employee to "
"disburse funds to.")
]),
validators=[EmployeeUnique(object_name="employee id's")],
min_entries=1,
max_entries=24)
has_end_date = BooleanField('Add End Date',
render_kw={"class": "custom-control-input"})
end_date = StringField('End Date/Times',
validators=[
RequiredIf("has_end_date", message="The end date is required."),
EndDateProper(),
],
render_kw={"placeholder": "Date Date/Times",
"class": "form-control"})
has_velocity_controls = BooleanField('Add Velocity Controls',
render_kw={"class": "custom-control-input"})
vel_control_name = StringField('Control Name',
validators=[
RequiredIf('has_velocity_controls',
message="The velocity control, control name is required."),
Length(max=50)
],
render_kw={"class": "form-control",
"placeholder": "Enter a Control Name"})
vel_control_window = SelectField('Control Window',
validators=[
RequiredIf('has_velocity_controls',
message="The velocity control, control window is required."),
Length(max=30)
],
choices=[
('', 'Select a Control Time Period'),
('day', 'DAY'),
('week', 'WEEK'),
('month', 'MONTH'),
('lifetime', 'LIFETIME'),
('transaction', 'TRANSACTION')
],
render_kw={"class": "form-control"})
vel_amt_limit = DecimalField('Amount Limit',
validators=[
RequiredIf('has_velocity_controls',
message="The velocity control amount limit is required."),
NumberRange(min=MINIMUM_CONTROL_AMT,
message=f"The minimum velocity control amount limit must be at "
f"least ${MINIMUM_CONTROL_AMT}."),
VelocityUsageLimit()
],
render_kw={"placeholder": "Amount Limit",
"class": "form-control"},
widget=NumberInput())
vel_usage_limit = IntegerField('Usage Limit (0 - 100)',
validators=[
RequiredIf('has_velocity_controls',
message="The velocity control usage limit is required."),
NumberRange(min=0, max=100,
message="The velocity control usage limit should be between "
"0 and 100, inclusive.")
],
render_kw={"placeholder": "Usage Limit",
"class": "form-control"},
widget=NumberInput())
time_zone = HiddenField(validators=[InputRequired(message="The timezone is a required field")])
priority = HiddenField(validators=[
InputRequired(message="Priority is a required field"),
AnyOf(values=["Low", "Medium", "High", "Urgent"], message="Priority must be Low, Medium, High, or Urgent")
], default="Low")
return Plan # Return a reference to the class and not an object!
class Forminator:
def __init__(self, form):
# These will always be required
self._form = form
self._plan_name: str = form.plan_name.data
self._funding_amount: str = form.funding_amount.data
self._plan_justification = form.plan_justification.data
self._memo = form.memo.data
self._start_date = form.start_date.data
self._source_fund = form.source_fund.data
self._dest_fund = form.dest_fund.data
# Here on out is optional
self._has_fund_individuals = form.has_fund_individuals.data
self._disbursement_type = form.disbursement_type.data
# We only want the field list here NOT the data
self._employees_list = form.employees_list
self._has_end_date = form.has_end_date.data
self._end_date = form.end_date.data
self._has_velocity_controls = form.has_velocity_controls.data
self._vel_control_name = form.vel_control_name.data
self._vel_control_window = form.vel_control_window.data
self._vel_amt_limit = form.vel_amt_limit.data
self._vel_usage_limit = form.vel_usage_limit.data
self._time_zone = form.time_zone.data
self._priority = form.priority.data
self.clean()
def clean(self):
self._scrub_plan_name()
self._scrub_plan_justification()
self._scrub_memo()
self._scrub_dates()
# strings are truthy
if self._vel_control_name:
self._scrub_vel_control_name()
def _scrub_plan_name(self):
self._plan_name = self.scrub_plan_name(self._plan_name)
def _scrub_plan_justification(self):
self._plan_justification = self.scrub_plan_name(self._plan_justification) # We just use the same filter
def _scrub_memo(self):
self._memo = self.scrub_plan_name(self._memo) # We just use the same filter
def _scrub_dates(self):
self._start_date = self.scrub_date(self._start_date)
if self._end_date:
self._end_date = self.scrub_date(self._end_date)
def _scrub_vel_control_name(self):
self._vel_control_name = self.scrub_plan_name(self._vel_control_name)
@staticmethod
def scrub_date(date):
return date.strip()
@staticmethod
def scrub_plan_name(name):
return " ".join(name.split()).capitalize()
def is_disbursed_all(self):
x = self.has_fund_individuals and self.disbursement_type == self._form.DISB_ALL
if x is None:
return False
return x
def retrieve(self):
return self
@property
def plan_name(self):
return self._plan_name
@property
def funding_amount(self):
return self._funding_amount
@property
def plan_justification(self):
return self._plan_justification
@property
def memo(self):
return self._memo
@property
def start_date(self):
return self._start_date
@property
def source_fund(self):
return self._source_fund
@property
def dest_fund(self):
return self._dest_fund
@property
def has_fund_individuals(self):
return self._has_fund_individuals
@property
def disbursement_type(self):
return self._disbursement_type if self._disbursement_type else None
@property
def employees_list(self):
e_list = self._employees_list.data
if len(e_list) != 0 and e_list[0] != '':
for employeeField in e_list:
yield employeeField
else:
yield []
@employees_list.setter
def employees_list(self, e_list):
self._employees_list.pop_entry() # Remove the default entry
[self._employees_list.append_entry(e) for e in e_list]
@property
def has_end_date(self):
return self._has_end_date if self._has_end_date else None
@property
def end_date(self):
return self._end_date if self._end_date else None
@property
def has_velocity_controls(self):
return self._has_velocity_controls if self._has_velocity_controls else None
@property
def vel_control_name(self):
return self._vel_control_name if self._vel_control_name else None
@property
def vel_control_window(self):
return self._vel_control_window if self._vel_control_window else None
@property
def vel_amt_limit(self):
return self._vel_amt_limit if self._vel_amt_limit else None
@property
def vel_usage_limit(self):
return self._vel_usage_limit if self._vel_usage_limit else None
@property
def raw_form(self):
return self._form
@property
def time_zone(self):
return self._time_zone
@property
def priority(self):
return self._priority
| true | true |
f724cdce03e831ba60b697c99d9b9995f15edd45 | 18,132 | py | Python | cltk/corpus/utils/importer.py | Nada1996/cltk | 594f6aecff64c449a637ed05cd2c4655a606ba2d | [
"MIT"
] | null | null | null | cltk/corpus/utils/importer.py | Nada1996/cltk | 594f6aecff64c449a637ed05cd2c4655a606ba2d | [
"MIT"
] | null | null | null | cltk/corpus/utils/importer.py | Nada1996/cltk | 594f6aecff64c449a637ed05cd2c4655a606ba2d | [
"MIT"
] | null | null | null | """Import CLTK corpora.
TODO: Fix so ``import_corpora()`` can take relative path.
TODO: Add https://github.com/cltk/pos_latin
"""
from cltk.corpus.arabic.corpora import ARABIC_CORPORA
from cltk.corpus.chinese.corpora import CHINESE_CORPORA
from cltk.corpus.coptic.corpora import COPTIC_CORPORA
from cltk.corpus.greek.corpora import GREEK_CORPORA
from cltk.corpus.hebrew.corpora import HEBREW_CORPORA
from cltk.corpus.latin.corpora import LATIN_CORPORA
from cltk.corpus.sanskrit.corpora import SANSKRIT_CORPORA
from cltk.corpus.multilingual.corpora import MULTILINGUAL_CORPORA
from cltk.corpus.pali.corpora import PALI_CORPORA
from cltk.corpus.punjabi.corpora import PUNJABI_CORPORA
from cltk.corpus.tibetan.corpora import TIBETAN_CORPORA
from cltk.corpus.old_english.corpora import OLD_ENGLISH_CORPORA
from cltk.corpus.bengali.corpora import BENGALI_CORPORA
from cltk.corpus.old_church_slavonic.corpora import OCS_CORPORA
from cltk.corpus.prakrit.corpora import PRAKRIT_CORPORA
from cltk.corpus.hindi.corpora import HINDI_CORPORA
from cltk.corpus.javanese.corpora import JAVANESE_CORPORA
from cltk.corpus.malayalam.corpora import MALAYALAM_CORPORA
from cltk.corpus.old_norse.corpora import OLD_NORSE_CORPORA
from cltk.corpus.telugu.corpora import TELUGU_CORPORA
from cltk.corpus.classical_hindi.corpora import CLASSICAL_HINDI_CORPORA
from cltk.corpus.french.corpora import FRENCH_CORPORA
from cltk.corpus.marathi.corpora import MARATHI_CORPORA
from cltk.corpus.gujarati.corpora import GUJARATI_CORPORA
from cltk.corpus.medieval.corpora import MEDIEVAL_CORPORA
from cltk.utils.cltk_logger import logger
import errno
from git import RemoteProgress
from git import Repo
import os
import sys
import shutil
from urllib.parse import urljoin
import yaml
__author__ = ['Kyle P. Johnson <kyle@kyle-p-johnson.com>', 'Stephen Margheim <stephen.margheim@gmail.com>']
__license__ = 'MIT License. See LICENSE.'
AVAILABLE_LANGUAGES = ['arabic', 'chinese', 'coptic', 'greek', 'hebrew', 'latin', 'multilingual',
'pali', 'punjabi', 'tibetan', 'sanskrit', 'old_english',
'bengali', 'prakrit', 'hindi', 'old_church_slavonic',
'malayalam', 'marathi', 'javanese','old_norse','telugu','classical_hindi',
'french', 'gujarati', 'middle_high_german','medieval',]
CLTK_DATA_DIR = '~/cltk_data'
LANGUAGE_CORPORA = {'arabic': ARABIC_CORPORA,
'chinese': CHINESE_CORPORA,
'coptic': COPTIC_CORPORA,
'greek': GREEK_CORPORA,
'hebrew': HEBREW_CORPORA,
'latin': LATIN_CORPORA,
'multilingual': MULTILINGUAL_CORPORA,
'pali': PALI_CORPORA,
'punjabi': PUNJABI_CORPORA,
'tibetan': TIBETAN_CORPORA,
'sanskrit': SANSKRIT_CORPORA,
'old_english': OLD_ENGLISH_CORPORA,
'bengali': BENGALI_CORPORA,
'old_church_slavonic': OCS_CORPORA,
'prakrit': PRAKRIT_CORPORA,
'hindi': HINDI_CORPORA,
'malayalam': MALAYALAM_CORPORA,
'marathi': MARATHI_CORPORA,
'javanese': JAVANESE_CORPORA,
'old_norse':OLD_NORSE_CORPORA,
'telugu':TELUGU_CORPORA,
'classical_hindi':CLASSICAL_HINDI_CORPORA,
'french':FRENCH_CORPORA,
'gujarati': GUJARATI_CORPORA,
'medieval':MEDIEVAL_CORPORA,
}
class CorpusImportError(Exception):
"""CLTK exception to use when something goes wrong importing corpora"""
pass
class ProgressPrinter(RemoteProgress):
"""Class that implements progress reporting."""
def update(self, op_code, cur_count, max_count=None, message=''):
if message:
percentage = '%.0f' % (100 * cur_count / (max_count or 100.0))
sys.stdout.write('Downloaded %s%% %s \r' % (percentage, message))
class CorpusImporter:
"""Import CLTK corpora."""
def __init__(self, language, testing=False):
"""Setup corpus importing.
`testing` is a hack to check a tmp .yaml file to look at or local corpus. This keeps from overwriting
local. A better idea is probably to refuse to overwrite the .yaml.
"""
self.language = language.lower()
assert isinstance(testing, bool), '`testing` parameter must be boolean type'
self.testing = testing
self.user_defined_corpora = self._setup_language_variables()
# if user_defined_corpora, then we need to add these to the corpus.py objects
if self.user_defined_corpora:
logger.info('User-defined corpus found for "{}" language'.format(self.language))
try:
logger.debug('Core corpora also found for "{}" language'.format(self.language))
logger.debug('Combining the user-defined and the core corpora')
self.official_corpora = LANGUAGE_CORPORA[self.language]
self.all_corpora = self.official_corpora
for corpus in self.user_defined_corpora:
self.all_corpora.append(corpus)
except KeyError:
logger.debug('Nothing in the official repos '
'for "{}" language. Make the all_corpora solely '
'from the .yaml'.format(self.language))
self.all_corpora = []
for corpus in self.user_defined_corpora:
self.all_corpora.append(corpus)
else:
logger.info('No user-defined corpora found for "{}" language'.format(self.language))
# self.official_corpora = LANGUAGE_CORPORA[self.language]
self.all_corpora = LANGUAGE_CORPORA[self.language]
def __repr__(self):
"""Representation string for ipython
:rtype : str
"""
return 'CorpusImporter for: {}'.format(self.language)
def _check_distributed_corpora_file(self):
"""Check '~/cltk_data/distributed_corpora.yaml' for any custom,
distributed corpora that the user wants to load locally.
TODO: write check or try if `cltk_data` dir is not present
"""
if self.testing:
distributed_corpora_fp = os.path.expanduser('~/cltk_data/test_distributed_corpora.yaml')
else:
distributed_corpora_fp = os.path.expanduser('~/cltk_data/distributed_corpora.yaml')
try:
with open(distributed_corpora_fp) as file_open:
corpora_dict = yaml.safe_load(file_open)
except FileNotFoundError:
logger.info('`~/cltk_data/distributed_corpora.yaml` file not found.')
return []
except yaml.parser.ParserError as parse_err:
logger.debug('Yaml parsing error: %s' % parse_err)
return []
user_defined_corpora = []
for corpus_name in corpora_dict:
about = corpora_dict[corpus_name]
if about['language'].lower() == self.language:
user_defined_corpus = dict()
# user_defined_corpus['git_remote'] = about['git_remote']
user_defined_corpus['origin'] = about['origin']
user_defined_corpus['type'] = about['type']
user_defined_corpus['name'] = corpus_name
user_defined_corpora.append(user_defined_corpus)
return user_defined_corpora
def _setup_language_variables(self):
"""Check for availability of corpora for a language.
TODO: Make the selection of available languages dynamic from dirs
within ``corpora`` which contain a ``corpora.py`` file.
"""
if self.language not in AVAILABLE_LANGUAGES:
# If no official repos, check if user has custom
user_defined_corpora = self._check_distributed_corpora_file()
if user_defined_corpora:
return user_defined_corpora
else:
msg = 'Corpora not available (either core or user-defined) for the "{}" language.'.format(self.language)
logger.info(msg)
raise CorpusImportError(msg)
else:
user_defined_corpora = self._check_distributed_corpora_file()
return user_defined_corpora
@property
def list_corpora(self):
"""Show corpora available for the CLTK to download."""
try:
# corpora = LANGUAGE_CORPORA[self.language]
corpora = self.all_corpora
corpus_names = [corpus['name'] for corpus in corpora]
return corpus_names
except (NameError, KeyError) as error:
msg = 'Corpus not available for language "{}": {}'.format(self.language, error)
logger.error(msg)
raise CorpusImportError(msg)
@staticmethod
def _copy_dir_recursive(src_rel, dst_rel):
"""Copy contents of one directory to another. `dst_rel` dir cannot
exist. Source: http://stackoverflow.com/a/1994840
TODO: Move this to file_operations.py module.
:type src_rel: str
:param src_rel: Directory to be copied.
:type dst_rel: str
:param dst_rel: Directory to be created with contents of ``src_rel``.
"""
src = os.path.expanduser(src_rel)
dst = os.path.expanduser(dst_rel)
try:
shutil.copytree(src, dst)
logger.info('Files copied from %s to %s', src, dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
logger.info('Files copied from %s to %s', src, dst)
else:
raise
def _get_corpus_properties(self, corpus_name):
"""Check whether a corpus is available for import.
:type corpus_name: str
:param corpus_name: Name of available corpus.
:rtype : str
"""
try:
# corpora = LANGUAGE_CORPORA[self.language]
corpora = self.all_corpora
except NameError as name_error:
msg = 'Corpus not available for language ' \
'"%s": %s' % (self.language, name_error)
logger.error(msg)
raise CorpusImportError(msg)
for corpus_properties in corpora:
if corpus_properties['name'] == corpus_name:
return corpus_properties
msg = 'Corpus "%s" not available for the ' \
'"%s" language.' % (corpus_name, self.language)
logger.error(msg)
raise CorpusImportError(msg)
def _git_user_defined_corpus(self, corpus_name, corpus_type, uri: str, branch='master'):
"""Clone or update a git repo defined by user.
TODO: This code is very redundant with what's in import_corpus(),
could be refactored.
"""
# git_uri = urljoin('https://github.com/cltk/', corpus_name + '.git')
# self._download_corpus(corpus_type, corpus_name, path)
type_dir_rel = os.path.join(CLTK_DATA_DIR, self.language, corpus_type)
type_dir = os.path.expanduser(type_dir_rel)
repo_name = uri.split('/')[-1] # eg, 'latin_corpus_newton_example.git'
repo_name = repo_name.rstrip('.git')
target_dir = os.path.join(type_dir, repo_name)
target_file = os.path.join(type_dir, repo_name, 'README.md')
# check if corpus already present
# if not, clone
if not os.path.isfile(target_file):
if not os.path.isdir(type_dir):
os.makedirs(type_dir)
try:
msg = "Cloning '{}' from '{}'".format(corpus_name, uri)
logger.info(msg)
Repo.clone_from(uri, target_dir, branch=branch, depth=1,
progress=ProgressPrinter())
except CorpusImportError as corpus_imp_err:
msg = "Git clone of '{}' failed: '{}'".format(uri, corpus_imp_err)
logger.error(msg)
# if corpus is present, pull latest
else:
try:
repo = Repo(target_dir)
assert not repo.bare # or: assert repo.exists()
git_origin = repo.remotes.origin
msg = "Pulling latest '{}' from '{}'.".format(corpus_name, uri)
logger.info(msg)
git_origin.pull()
except CorpusImportError as corpus_imp_err:
msg = "Git pull of '{}' failed: '{}'".format(uri, corpus_imp_err)
logger.error(msg)
def import_corpus(self, corpus_name, local_path=None, branch='master'): # pylint: disable=R0912
"""Download a remote or load local corpus into dir ``~/cltk_data``.
TODO: maybe add ``from git import RemoteProgress``
TODO: refactor this, it's getting kinda long
:type corpus_name: str
:param corpus_name: The name of an available corpus.
:param local_path: str
:param local_path: A filepath, required when importing local corpora.
:param branch: What Git branch to clone.
"""
corpus_properties = self._get_corpus_properties(corpus_name)
try:
location = corpus_properties['location']
except KeyError:
# git_uri = corpus_properties['git_remote']
git_name = corpus_properties['']
git_uri = corpus_properties['origin']
git_type = corpus_properties['type']
# pass this off to a special downloader just for custom urls
self._git_user_defined_corpus(git_name, git_type, git_uri)
return
corpus_type = corpus_properties['type']
if location == 'remote':
# git_uri = urljoin('https://github.com/cltk/', corpus_name + '.git')
git_uri = corpus_properties['origin']
type_dir_rel = os.path.join(CLTK_DATA_DIR, self.language, corpus_type)
type_dir = os.path.expanduser(type_dir_rel)
target_dir = os.path.join(type_dir, corpus_name)
target_file = os.path.join(type_dir, corpus_name, 'README.md')
# check if corpus already present
# if not, clone
if not os.path.isfile(target_file):
if not os.path.isdir(type_dir):
os.makedirs(type_dir)
try:
msg = "Cloning '{}' from '{}'".format(corpus_name, git_uri)
logger.info(msg)
Repo.clone_from(git_uri, target_dir, branch=branch, depth=1,
progress=ProgressPrinter())
except CorpusImportError as corpus_imp_err:
msg = "Git clone of '{}' failed: '{}'".format(git_uri, corpus_imp_err)
logger.error(msg)
# if corpus is present, pull latest
else:
try:
repo = Repo(target_dir)
assert not repo.bare # or: assert repo.exists()
git_origin = repo.remotes.origin
msg = "Pulling latest '{}' from '{}'.".format(corpus_name, git_uri)
logger.info(msg)
git_origin.pull()
except CorpusImportError as corpus_imp_err:
msg = "Git pull of '{}' failed: '{}'".format(git_uri, corpus_imp_err)
logger.error(msg)
elif location == 'local':
msg = "Importing from local path: '{}'".format(local_path)
logger.info(msg)
if corpus_name in ('phi5', 'phi7', 'tlg'):
if corpus_name == 'phi5':
# normalize path for checking dir
if local_path.endswith('/'):
local_path = local_path[:-1]
# check for right corpus dir
if os.path.split(local_path)[1] != 'PHI5':
logger.info("Directory must be named 'PHI5'.")
if corpus_name == 'phi7':
# normalize local_path for checking dir
if local_path.endswith('/'):
local_path = local_path[:-1]
# check for right corpus dir
if os.path.split(local_path)[1] != 'PHI7':
logger.info("Directory must be named 'PHI7'.")
if corpus_name == 'tlg':
# normalize path for checking dir
if local_path.endswith('/'):
local_path = local_path[:-1]
# check for right corpus dir
if os.path.split(local_path)[1] != 'TLG_E':
logger.info("Directory must be named 'TLG_E'.")
# move the dir-checking commands into a function
data_dir = os.path.expanduser(CLTK_DATA_DIR)
originals_dir = os.path.join(data_dir, 'originals')
# check for `originals` dir; if not present mkdir
if not os.path.isdir(originals_dir):
os.makedirs(originals_dir)
msg = "Wrote directory at '{}'.".format(originals_dir)
logger.info(msg)
tlg_originals_dir = os.path.join(data_dir,
'originals',
corpus_name)
# check for `originals/<corpus_name>`; if pres, delete
if os.path.isdir(tlg_originals_dir):
shutil.rmtree(tlg_originals_dir)
msg = "Removed directory at '{}'.".format(tlg_originals_dir)
logger.info(msg)
# copy_dir requires that target
if not os.path.isdir(tlg_originals_dir):
self._copy_dir_recursive(local_path, tlg_originals_dir)
if __name__ == '__main__':
c = CorpusImporter('latin')
# print(c.list_corpora)
c.import_corpus('latin_training_set_sentence_cltk')
| 46.020305 | 120 | 0.596073 | from cltk.corpus.arabic.corpora import ARABIC_CORPORA
from cltk.corpus.chinese.corpora import CHINESE_CORPORA
from cltk.corpus.coptic.corpora import COPTIC_CORPORA
from cltk.corpus.greek.corpora import GREEK_CORPORA
from cltk.corpus.hebrew.corpora import HEBREW_CORPORA
from cltk.corpus.latin.corpora import LATIN_CORPORA
from cltk.corpus.sanskrit.corpora import SANSKRIT_CORPORA
from cltk.corpus.multilingual.corpora import MULTILINGUAL_CORPORA
from cltk.corpus.pali.corpora import PALI_CORPORA
from cltk.corpus.punjabi.corpora import PUNJABI_CORPORA
from cltk.corpus.tibetan.corpora import TIBETAN_CORPORA
from cltk.corpus.old_english.corpora import OLD_ENGLISH_CORPORA
from cltk.corpus.bengali.corpora import BENGALI_CORPORA
from cltk.corpus.old_church_slavonic.corpora import OCS_CORPORA
from cltk.corpus.prakrit.corpora import PRAKRIT_CORPORA
from cltk.corpus.hindi.corpora import HINDI_CORPORA
from cltk.corpus.javanese.corpora import JAVANESE_CORPORA
from cltk.corpus.malayalam.corpora import MALAYALAM_CORPORA
from cltk.corpus.old_norse.corpora import OLD_NORSE_CORPORA
from cltk.corpus.telugu.corpora import TELUGU_CORPORA
from cltk.corpus.classical_hindi.corpora import CLASSICAL_HINDI_CORPORA
from cltk.corpus.french.corpora import FRENCH_CORPORA
from cltk.corpus.marathi.corpora import MARATHI_CORPORA
from cltk.corpus.gujarati.corpora import GUJARATI_CORPORA
from cltk.corpus.medieval.corpora import MEDIEVAL_CORPORA
from cltk.utils.cltk_logger import logger
import errno
from git import RemoteProgress
from git import Repo
import os
import sys
import shutil
from urllib.parse import urljoin
import yaml
__author__ = ['Kyle P. Johnson <kyle@kyle-p-johnson.com>', 'Stephen Margheim <stephen.margheim@gmail.com>']
__license__ = 'MIT License. See LICENSE.'
AVAILABLE_LANGUAGES = ['arabic', 'chinese', 'coptic', 'greek', 'hebrew', 'latin', 'multilingual',
'pali', 'punjabi', 'tibetan', 'sanskrit', 'old_english',
'bengali', 'prakrit', 'hindi', 'old_church_slavonic',
'malayalam', 'marathi', 'javanese','old_norse','telugu','classical_hindi',
'french', 'gujarati', 'middle_high_german','medieval',]
CLTK_DATA_DIR = '~/cltk_data'
LANGUAGE_CORPORA = {'arabic': ARABIC_CORPORA,
'chinese': CHINESE_CORPORA,
'coptic': COPTIC_CORPORA,
'greek': GREEK_CORPORA,
'hebrew': HEBREW_CORPORA,
'latin': LATIN_CORPORA,
'multilingual': MULTILINGUAL_CORPORA,
'pali': PALI_CORPORA,
'punjabi': PUNJABI_CORPORA,
'tibetan': TIBETAN_CORPORA,
'sanskrit': SANSKRIT_CORPORA,
'old_english': OLD_ENGLISH_CORPORA,
'bengali': BENGALI_CORPORA,
'old_church_slavonic': OCS_CORPORA,
'prakrit': PRAKRIT_CORPORA,
'hindi': HINDI_CORPORA,
'malayalam': MALAYALAM_CORPORA,
'marathi': MARATHI_CORPORA,
'javanese': JAVANESE_CORPORA,
'old_norse':OLD_NORSE_CORPORA,
'telugu':TELUGU_CORPORA,
'classical_hindi':CLASSICAL_HINDI_CORPORA,
'french':FRENCH_CORPORA,
'gujarati': GUJARATI_CORPORA,
'medieval':MEDIEVAL_CORPORA,
}
class CorpusImportError(Exception):
pass
class ProgressPrinter(RemoteProgress):
def update(self, op_code, cur_count, max_count=None, message=''):
if message:
percentage = '%.0f' % (100 * cur_count / (max_count or 100.0))
sys.stdout.write('Downloaded %s%% %s \r' % (percentage, message))
class CorpusImporter:
def __init__(self, language, testing=False):
self.language = language.lower()
assert isinstance(testing, bool), '`testing` parameter must be boolean type'
self.testing = testing
self.user_defined_corpora = self._setup_language_variables()
if self.user_defined_corpora:
logger.info('User-defined corpus found for "{}" language'.format(self.language))
try:
logger.debug('Core corpora also found for "{}" language'.format(self.language))
logger.debug('Combining the user-defined and the core corpora')
self.official_corpora = LANGUAGE_CORPORA[self.language]
self.all_corpora = self.official_corpora
for corpus in self.user_defined_corpora:
self.all_corpora.append(corpus)
except KeyError:
logger.debug('Nothing in the official repos '
'for "{}" language. Make the all_corpora solely '
'from the .yaml'.format(self.language))
self.all_corpora = []
for corpus in self.user_defined_corpora:
self.all_corpora.append(corpus)
else:
logger.info('No user-defined corpora found for "{}" language'.format(self.language))
self.all_corpora = LANGUAGE_CORPORA[self.language]
def __repr__(self):
return 'CorpusImporter for: {}'.format(self.language)
def _check_distributed_corpora_file(self):
if self.testing:
distributed_corpora_fp = os.path.expanduser('~/cltk_data/test_distributed_corpora.yaml')
else:
distributed_corpora_fp = os.path.expanduser('~/cltk_data/distributed_corpora.yaml')
try:
with open(distributed_corpora_fp) as file_open:
corpora_dict = yaml.safe_load(file_open)
except FileNotFoundError:
logger.info('`~/cltk_data/distributed_corpora.yaml` file not found.')
return []
except yaml.parser.ParserError as parse_err:
logger.debug('Yaml parsing error: %s' % parse_err)
return []
user_defined_corpora = []
for corpus_name in corpora_dict:
about = corpora_dict[corpus_name]
if about['language'].lower() == self.language:
user_defined_corpus = dict()
user_defined_corpus['origin'] = about['origin']
user_defined_corpus['type'] = about['type']
user_defined_corpus['name'] = corpus_name
user_defined_corpora.append(user_defined_corpus)
return user_defined_corpora
def _setup_language_variables(self):
if self.language not in AVAILABLE_LANGUAGES:
user_defined_corpora = self._check_distributed_corpora_file()
if user_defined_corpora:
return user_defined_corpora
else:
msg = 'Corpora not available (either core or user-defined) for the "{}" language.'.format(self.language)
logger.info(msg)
raise CorpusImportError(msg)
else:
user_defined_corpora = self._check_distributed_corpora_file()
return user_defined_corpora
@property
def list_corpora(self):
try:
corpora = self.all_corpora
corpus_names = [corpus['name'] for corpus in corpora]
return corpus_names
except (NameError, KeyError) as error:
msg = 'Corpus not available for language "{}": {}'.format(self.language, error)
logger.error(msg)
raise CorpusImportError(msg)
@staticmethod
def _copy_dir_recursive(src_rel, dst_rel):
src = os.path.expanduser(src_rel)
dst = os.path.expanduser(dst_rel)
try:
shutil.copytree(src, dst)
logger.info('Files copied from %s to %s', src, dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
logger.info('Files copied from %s to %s', src, dst)
else:
raise
def _get_corpus_properties(self, corpus_name):
try:
corpora = self.all_corpora
except NameError as name_error:
msg = 'Corpus not available for language ' \
'"%s": %s' % (self.language, name_error)
logger.error(msg)
raise CorpusImportError(msg)
for corpus_properties in corpora:
if corpus_properties['name'] == corpus_name:
return corpus_properties
msg = 'Corpus "%s" not available for the ' \
'"%s" language.' % (corpus_name, self.language)
logger.error(msg)
raise CorpusImportError(msg)
def _git_user_defined_corpus(self, corpus_name, corpus_type, uri: str, branch='master'):
type_dir_rel = os.path.join(CLTK_DATA_DIR, self.language, corpus_type)
type_dir = os.path.expanduser(type_dir_rel)
repo_name = uri.split('/')[-1]
repo_name = repo_name.rstrip('.git')
target_dir = os.path.join(type_dir, repo_name)
target_file = os.path.join(type_dir, repo_name, 'README.md')
if not os.path.isfile(target_file):
if not os.path.isdir(type_dir):
os.makedirs(type_dir)
try:
msg = "Cloning '{}' from '{}'".format(corpus_name, uri)
logger.info(msg)
Repo.clone_from(uri, target_dir, branch=branch, depth=1,
progress=ProgressPrinter())
except CorpusImportError as corpus_imp_err:
msg = "Git clone of '{}' failed: '{}'".format(uri, corpus_imp_err)
logger.error(msg)
else:
try:
repo = Repo(target_dir)
assert not repo.bare
git_origin = repo.remotes.origin
msg = "Pulling latest '{}' from '{}'.".format(corpus_name, uri)
logger.info(msg)
git_origin.pull()
except CorpusImportError as corpus_imp_err:
msg = "Git pull of '{}' failed: '{}'".format(uri, corpus_imp_err)
logger.error(msg)
def import_corpus(self, corpus_name, local_path=None, branch='master'):
corpus_properties = self._get_corpus_properties(corpus_name)
try:
location = corpus_properties['location']
except KeyError:
git_name = corpus_properties['']
git_uri = corpus_properties['origin']
git_type = corpus_properties['type']
self._git_user_defined_corpus(git_name, git_type, git_uri)
return
corpus_type = corpus_properties['type']
if location == 'remote':
git_uri = corpus_properties['origin']
type_dir_rel = os.path.join(CLTK_DATA_DIR, self.language, corpus_type)
type_dir = os.path.expanduser(type_dir_rel)
target_dir = os.path.join(type_dir, corpus_name)
target_file = os.path.join(type_dir, corpus_name, 'README.md')
if not os.path.isfile(target_file):
if not os.path.isdir(type_dir):
os.makedirs(type_dir)
try:
msg = "Cloning '{}' from '{}'".format(corpus_name, git_uri)
logger.info(msg)
Repo.clone_from(git_uri, target_dir, branch=branch, depth=1,
progress=ProgressPrinter())
except CorpusImportError as corpus_imp_err:
msg = "Git clone of '{}' failed: '{}'".format(git_uri, corpus_imp_err)
logger.error(msg)
else:
try:
repo = Repo(target_dir)
assert not repo.bare
git_origin = repo.remotes.origin
msg = "Pulling latest '{}' from '{}'.".format(corpus_name, git_uri)
logger.info(msg)
git_origin.pull()
except CorpusImportError as corpus_imp_err:
msg = "Git pull of '{}' failed: '{}'".format(git_uri, corpus_imp_err)
logger.error(msg)
elif location == 'local':
msg = "Importing from local path: '{}'".format(local_path)
logger.info(msg)
if corpus_name in ('phi5', 'phi7', 'tlg'):
if corpus_name == 'phi5':
if local_path.endswith('/'):
local_path = local_path[:-1]
if os.path.split(local_path)[1] != 'PHI5':
logger.info("Directory must be named 'PHI5'.")
if corpus_name == 'phi7':
if local_path.endswith('/'):
local_path = local_path[:-1]
if os.path.split(local_path)[1] != 'PHI7':
logger.info("Directory must be named 'PHI7'.")
if corpus_name == 'tlg':
if local_path.endswith('/'):
local_path = local_path[:-1]
if os.path.split(local_path)[1] != 'TLG_E':
logger.info("Directory must be named 'TLG_E'.")
data_dir = os.path.expanduser(CLTK_DATA_DIR)
originals_dir = os.path.join(data_dir, 'originals')
if not os.path.isdir(originals_dir):
os.makedirs(originals_dir)
msg = "Wrote directory at '{}'.".format(originals_dir)
logger.info(msg)
tlg_originals_dir = os.path.join(data_dir,
'originals',
corpus_name)
if os.path.isdir(tlg_originals_dir):
shutil.rmtree(tlg_originals_dir)
msg = "Removed directory at '{}'.".format(tlg_originals_dir)
logger.info(msg)
if not os.path.isdir(tlg_originals_dir):
self._copy_dir_recursive(local_path, tlg_originals_dir)
if __name__ == '__main__':
c = CorpusImporter('latin')
c.import_corpus('latin_training_set_sentence_cltk')
| true | true |
f724cdeca3f91643abf9127ba1abde54edc87cec | 16,147 | py | Python | hikyuu/admin/HikyuuAdmin.py | dasuren/hikyuu | d1a1a43c10653d17ac91446e4499e6cfbfdbce12 | [
"MIT"
] | 1 | 2021-05-20T14:47:16.000Z | 2021-05-20T14:47:16.000Z | hikyuu/admin/HikyuuAdmin.py | dasuren/hikyuu | d1a1a43c10653d17ac91446e4499e6cfbfdbce12 | [
"MIT"
] | null | null | null | hikyuu/admin/HikyuuAdmin.py | dasuren/hikyuu | d1a1a43c10653d17ac91446e4499e6cfbfdbce12 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2010-2019 fasiondog/hikyuu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import sys
import os
from PyQt5.QtWidgets import QVBoxLayout
cur_dir = os.path.dirname(__file__)
# 将当前目录加入 sys.path 以便其下子模块可以互相引用
sys.path.append(cur_dir)
# 将hikyuu目录加入 sys.path 以便直接引用 utils 包
sys.path.append(os.path.split(cur_dir)[0])
from PyQt5 import QtCore, QtGui, QtWidgets
import qdarkstyle
from UiConfig import UiConfig
from translate import _translate
from widget.HkuSessionViewWidget import HkuSessionViewWidget
from dialog import *
from widget import *
from data import (get_local_db, SessionModel)
from service import AssisService
class MyMainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
appid = 'HikyuuAdmin'
QtWidgets.QApplication.setApplicationName(appid)
QtWidgets.QApplication.setOrganizationName("org.hikyuu")
# 国际化支持
loc = QtCore.QLocale()
if loc.language() == QtCore.QLocale.Chinese:
self.trans = QtCore.QTranslator()
self.trans.load("{}/language/zh_CN.qm".format(os.path.dirname(__file__))) # 读取qm语言包
_app = QtWidgets.QApplication.instance() # 应用实例
_app.installTranslator(self.trans) # 将翻译者安装到实例中
# 设置程序图标资源
# 如未能正常显示图标,请检查 "import resource" 是否
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/logo/logo_16.png"))
icon.addPixmap(QtGui.QPixmap(":/logo/logo_32.png"))
icon.addPixmap(QtGui.QPixmap(":/logo/logo_48.png"))
icon.addPixmap(QtGui.QPixmap(":/logo/logo_64.png"))
icon.addPixmap(QtGui.QPixmap(":/logo/logo_128.png"))
icon.addPixmap(QtGui.QPixmap(":/logo/logo_256.png"))
self.setWindowIcon(icon)
if sys.platform == 'win32':
# window下设置任务栏图片
import ctypes
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(appid)
self.ui_config = UiConfig()
self.setObjectName("HikyuuAdminMainWindow")
self.setWindowTitle(_translate("MainWindow", "Hikyuu Strategy Server Manager"))
# 绑定本地数据库,辅助使用,尽量直接使用 Model 中的方法
self.db = get_local_db()
self.initAction()
self.initMenuBar()
self.initMenu()
self.initToolBar()
self.initActionConnect()
self.initMainTabWidget()
self.initDockWidgets()
self.statusBar().showMessage(_translate('MainWindow', 'Running'))
# 在窗口初始化完毕后,根据历史信息对窗口风格和大小进行重置
style = self.ui_config.get('main_window', 'style', fallback='normal_style')
if style == 'dark_style':
QtWidgets.qApp.setStyleSheet(qdarkstyle.load_stylesheet(qt_api='pyqt5'))
if self.ui_config.getboolean('main_window', 'maximized', fallback=False):
self.showMaximized()
else:
self.resize(
self.ui_config.getint('main_window', 'width', fallback=800),
self.ui_config.getint('main_window', 'height', fallback=500)
)
QtCore.QMetaObject.connectSlotsByName(self)
@property
def session(self):
return self.db.session
def closeEvent(self, event):
self.ui_config.save(self)
event.accept()
def initAction(self):
self.action_dict = dict(
action_new_file_session=QtWidgets.QAction(
QtGui.QIcon(":/icon/new_32.png"), _translate("MainWindow", "&New Session"), self
),
action_edit_file_session=QtWidgets.QAction(
QtGui.QIcon(":/icon/edit_32.png"), _translate("MainWindow", "&Edit Session"), self
),
action_del_file_session=QtWidgets.QAction(
QtGui.QIcon(":/icon/cancel_32.png"), _translate("MainWindow", "&Remove Session"), self
),
action_file_connect=QtWidgets.QAction(
QtGui.QIcon(":/icon/connect_32.png"), _translate('MainWindow', '&Connect Now')
),
action_file_quit=QtWidgets.QAction(
QtGui.QIcon(":/icon/quit_32.png"), _translate('MainWindow', '&Quit'), self
),
action_view_normal_style=QtWidgets.QAction(_translate('MainWindow', 'Normal style'), self),
action_view_dark_style=QtWidgets.QAction(_translate('MainWindow', 'Dark style'), self),
action_about=QtWidgets.QAction(_translate('MainWindow', 'About'), self),
action_about_qt=QtWidgets.QAction(_translate('MainWindow', 'About Qt'), self),
)
self.action_dict['action_new_file_session'].setStatusTip(_translate('MainWindow', 'New Session'))
self.action_dict['action_file_connect'].setStatusTip(_translate('MainWindow', 'Connect Now'))
self.action_dict['action_file_quit'].setStatusTip(_translate('MainWindow', 'Quit Application'))
self.action_dict['action_about_qt'].setStatusTip(_translate('MainWindow', "Show the Qt library's About box"))
self.action_dict['action_view_normal_style'].setObjectName('normal_style')
self.action_dict['action_view_normal_style'].setStatusTip(_translate('MainWindow', 'Switch to normal style'))
self.action_dict['action_view_dark_style'].setObjectName('dark_style')
self.action_dict['action_view_dark_style'].setStatusTip(_translate('MainWindow', 'Switch to dark style'))
self.action_dict['action_edit_file_session'].setEnabled(False)
self.action_dict['action_del_file_session'].setEnabled(False)
def initMenuBar(self):
self.menubar_dict = dict(
menu_file=self.menuBar().addMenu(_translate('MainWindow', "&File(F)")),
menu_view=self.menuBar().addMenu(_translate('MainWindow', "&View(V)")),
menu_help=self.menuBar().addMenu(_translate('MainWindow', "&Help(H)"))
)
def initMenu(self):
file_session_menu = self.menubar_dict['menu_file'].addMenu(
QtGui.QIcon(":/icon/server_16.png"), _translate('MainWindow', '&Session Manager')
)
style_menu = self.menubar_dict['menu_view'].addMenu(_translate('MainWindow', 'Skin style'))
self.menu_dict = dict(
menu_file_new_session=file_session_menu.addAction(self.action_dict['action_new_file_session']),
menu_file_edit_session=file_session_menu.addAction(self.action_dict['action_edit_file_session']),
menu_file_del_session=file_session_menu.addAction(self.action_dict['action_del_file_session']),
menu_file_connect=self.menubar_dict['menu_file'].addAction(self.action_dict['action_file_connect']),
menu_file_quit=self.menubar_dict['menu_file'].addAction(self.action_dict['action_file_quit']),
menu_view_normal_style=style_menu.addAction(self.action_dict['action_view_normal_style']),
menu_view_dark_style=style_menu.addAction(self.action_dict['action_view_dark_style']),
menu_about=self.menubar_dict['menu_help'].addAction(self.action_dict['action_about']),
menu_about_qt=self.menubar_dict['menu_help'].addAction(self.action_dict['action_about_qt']),
)
def initToolBar(self):
self.setUnifiedTitleAndToolBarOnMac(True)
file_toolbar = self.addToolBar('File')
file_toolbar.addAction(self.action_dict['action_new_file_session'])
file_toolbar.addAction(self.action_dict['action_edit_file_session'])
file_toolbar.addAction(self.action_dict['action_del_file_session'])
file_toolbar.addAction(self.action_dict['action_file_connect'])
file_toolbar.addAction(self.action_dict['action_file_quit'])
def initActionConnect(self):
self.action_dict['action_new_file_session'].triggered.connect(self.actionNewSession)
self.action_dict['action_edit_file_session'].triggered.connect(self.actionEditSession)
self.action_dict['action_del_file_session'].triggered.connect(self.actionDeleteSession)
self.action_dict['action_file_connect'].triggered.connect(self.actionConnect)
self.action_dict['action_file_quit'].triggered.connect(self.close)
self.action_dict['action_about'].triggered.connect(self.actionAbout)
self.action_dict['action_about_qt'].triggered.connect(QtWidgets.QApplication.aboutQt)
self.action_dict['action_view_normal_style'].triggered.connect(self.actionChangStyle)
self.action_dict['action_view_dark_style'].triggered.connect(self.actionChangStyle)
def initMainTabWidget(self):
self.main_tab = QtWidgets.QTabWidget()
self.setCentralWidget(self.main_tab)
# 设置为可关闭,并连接信号
self.main_tab.setTabsClosable(True)
self.main_tab.tabCloseRequested.connect(self.closeTab)
self.tab_title_user_manage = _translate("MainWindow", "User Manage")
self.tabs = {}
def initDockWidgets(self):
self.server_view_dock = HkuSessionViewWidget(self)
self.server_view_dock.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable) # 禁止关闭
self.server_view_dock.setMinimumWidth(200)
# 消除 docker window 的顶部按钮
title_bar = self.server_view_dock.titleBarWidget()
self.server_view_dock.setTitleBarWidget(QtWidgets.QWidget())
del title_bar
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.server_view_dock)
servers = self.db.session.query(SessionModel).order_by(SessionModel.name.asc()).all()
for server in servers:
server.running = False # SQLalchemy query 出来的对象并没有添加非数据库外的属性,此处手工添加保护
self.server_view_dock.addSession(server)
self.server_view_dock.user_manage_trigger.connect(self.openUserManageTab)
def actionAbout(self):
msg = _translate(
'MainWindow', "<p><b>Hikyuu Strategy Server Manager</b><p>"
"<p>Hikyuu strategy server management is used to "
"manage quant trading strategies based on hikyuu "
"quant framework</p>"
"<p><b>Hikyuu Quant Framework</b></p>"
"It is a high performance open source quantitative "
"trading research framework based on C++/Python, "
"which is used for stratgy analysis and back testing."
"Now it only used in Chinese stock market)</p>"
'<p>see more: <a href="https://hikyuu.org">https://hikyuu.org<a></p>'
)
QtWidgets.QMessageBox.about(self, _translate('MainWindow', 'About Hikyuu Strategy Server Manager'), msg)
def actionChangStyle(self):
QtWidgets.qApp.setStyleSheet('')
style_name = self.sender().objectName()
if style_name == 'dark_style':
QtWidgets.qApp.setStyleSheet(qdarkstyle.load_stylesheet(qt_api='pyqt5'))
self.ui_config.set('main_window', 'style', style_name)
def actionNewSession(self):
server_session = SessionModel()
session_dialog = HkuEditSessionDialog(self)
session_dialog.setWindowTitle(_translate("MainWindow", "New Session"))
session_dialog.setData(server_session)
if session_dialog.exec() >= 0:
session_data = session_dialog.getData()
session_data.save()
self.server_view_dock.addSession(session_data)
session_dialog.destroy()
def actionEditSession(self):
item = self.server_view_dock.tree.currentItem()
server_session = self.db.session.query(SessionModel).filter_by(name=item.text(0)).first() if item else None
if server_session is None:
QtWidgets.QMessageBox.about(
self, _translate("MainWindow", "info"), _translate("MainWindow", "Please select a session to execute")
)
return
edit_session_dialog = HkuEditSessionDialog(self)
edit_session_dialog.setWindowTitle(_translate("MainWindow", "Edit Session"))
edit_session_dialog.setData(server_session)
if edit_session_dialog.exec() >= 0:
session_data = edit_session_dialog.getData()
session_data.save()
self.server_view_dock.modifySession(item, session_data)
edit_session_dialog.destroy()
def actionDeleteSession(self):
item = self.server_view_dock.tree.currentItem()
data = item.data(0, QtCore.Qt.UserRole) if item is not None else None
if data is None:
QtWidgets.QMessageBox.about(
self, _translate("MainWindow", "info"), _translate("MainWindow", "Please select a session to execute")
)
return
ret = QtWidgets.QMessageBox.question(
self, _translate("MainWindow", "Confirm removal"),
_translate("MainWindow", "Confirm to remove the session (%s)?") % item.text(0),
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
if ret == QtWidgets.QMessageBox.Yes:
root_index = self.server_view_dock.tree.indexOfTopLevelItem(item)
self.server_view_dock.tree.takeTopLevelItem(root_index)
data.delete()
def actionConnect(self):
item = self.server_view_dock.tree.currentItem()
if item is None:
logging.error("Can't get currentItem.")
return
session = item.data(0, QtCore.Qt.UserRole)
status, msg = AssisService.getServerStatus(session)
if not session.running:
self.server_view_dock.set_gray(item)
QtWidgets.QMessageBox.warning(
self, _translate("MainWindow", "info"), _translate("MainWindow", "connection failed")
)
else:
self.server_view_dock.set_default(item)
self.server_view_dock.tree.viewport().update()
def closeTab(self, index):
title = self.main_tab.tabText(index)
self.main_tab.removeTab(index)
self.tabs[title] = None
def openUserManageTab(self, session):
"""用户管理"""
title = "{}({})".format(self.tab_title_user_manage, session.name)
if title not in self.tabs or self.tabs[title] is None:
if not session.running:
QtWidgets.QMessageBox.warning(
self, _translate("MainWindow", "info"),
_translate("MainWindow", "The server is disconnected. Please connect first!")
)
else:
tab = HkuUserManagerWidget(session, self.main_tab)
self.main_tab.addTab(tab, title)
self.tabs[title] = tab
def main_core():
FORMAT = '%(asctime)-15s [%(levelname)s]: %(message)s [%(name)s::%(funcName)s]'
logging.basicConfig(format=FORMAT, level=logging.INFO, handlers=[
logging.StreamHandler(),
])
# 自适应分辨率,防止字体显示不全
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
app = QtWidgets.QApplication(sys.argv)
main_win = MyMainWindow()
main_win.show()
exit_code = app.exec()
if exit_code == 888:
# 应用中使用 qApp.exit(888) 指示重启
del main_win
del app # 必须,否则最终无法正常退出应用
main_core()
else:
sys.exit()
if __name__ == "__main__":
main_core()
| 45.872159 | 118 | 0.675543 |
import logging
import sys
import os
from PyQt5.QtWidgets import QVBoxLayout
cur_dir = os.path.dirname(__file__)
sys.path.append(cur_dir)
sys.path.append(os.path.split(cur_dir)[0])
from PyQt5 import QtCore, QtGui, QtWidgets
import qdarkstyle
from UiConfig import UiConfig
from translate import _translate
from widget.HkuSessionViewWidget import HkuSessionViewWidget
from dialog import *
from widget import *
from data import (get_local_db, SessionModel)
from service import AssisService
class MyMainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
appid = 'HikyuuAdmin'
QtWidgets.QApplication.setApplicationName(appid)
QtWidgets.QApplication.setOrganizationName("org.hikyuu")
loc = QtCore.QLocale()
if loc.language() == QtCore.QLocale.Chinese:
self.trans = QtCore.QTranslator()
self.trans.load("{}/language/zh_CN.qm".format(os.path.dirname(__file__)))
_app = QtWidgets.QApplication.instance()
_app.installTranslator(self.trans)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/logo/logo_16.png"))
icon.addPixmap(QtGui.QPixmap(":/logo/logo_32.png"))
icon.addPixmap(QtGui.QPixmap(":/logo/logo_48.png"))
icon.addPixmap(QtGui.QPixmap(":/logo/logo_64.png"))
icon.addPixmap(QtGui.QPixmap(":/logo/logo_128.png"))
icon.addPixmap(QtGui.QPixmap(":/logo/logo_256.png"))
self.setWindowIcon(icon)
if sys.platform == 'win32':
import ctypes
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(appid)
self.ui_config = UiConfig()
self.setObjectName("HikyuuAdminMainWindow")
self.setWindowTitle(_translate("MainWindow", "Hikyuu Strategy Server Manager"))
self.db = get_local_db()
self.initAction()
self.initMenuBar()
self.initMenu()
self.initToolBar()
self.initActionConnect()
self.initMainTabWidget()
self.initDockWidgets()
self.statusBar().showMessage(_translate('MainWindow', 'Running'))
style = self.ui_config.get('main_window', 'style', fallback='normal_style')
if style == 'dark_style':
QtWidgets.qApp.setStyleSheet(qdarkstyle.load_stylesheet(qt_api='pyqt5'))
if self.ui_config.getboolean('main_window', 'maximized', fallback=False):
self.showMaximized()
else:
self.resize(
self.ui_config.getint('main_window', 'width', fallback=800),
self.ui_config.getint('main_window', 'height', fallback=500)
)
QtCore.QMetaObject.connectSlotsByName(self)
@property
def session(self):
return self.db.session
def closeEvent(self, event):
self.ui_config.save(self)
event.accept()
def initAction(self):
self.action_dict = dict(
action_new_file_session=QtWidgets.QAction(
QtGui.QIcon(":/icon/new_32.png"), _translate("MainWindow", "&New Session"), self
),
action_edit_file_session=QtWidgets.QAction(
QtGui.QIcon(":/icon/edit_32.png"), _translate("MainWindow", "&Edit Session"), self
),
action_del_file_session=QtWidgets.QAction(
QtGui.QIcon(":/icon/cancel_32.png"), _translate("MainWindow", "&Remove Session"), self
),
action_file_connect=QtWidgets.QAction(
QtGui.QIcon(":/icon/connect_32.png"), _translate('MainWindow', '&Connect Now')
),
action_file_quit=QtWidgets.QAction(
QtGui.QIcon(":/icon/quit_32.png"), _translate('MainWindow', '&Quit'), self
),
action_view_normal_style=QtWidgets.QAction(_translate('MainWindow', 'Normal style'), self),
action_view_dark_style=QtWidgets.QAction(_translate('MainWindow', 'Dark style'), self),
action_about=QtWidgets.QAction(_translate('MainWindow', 'About'), self),
action_about_qt=QtWidgets.QAction(_translate('MainWindow', 'About Qt'), self),
)
self.action_dict['action_new_file_session'].setStatusTip(_translate('MainWindow', 'New Session'))
self.action_dict['action_file_connect'].setStatusTip(_translate('MainWindow', 'Connect Now'))
self.action_dict['action_file_quit'].setStatusTip(_translate('MainWindow', 'Quit Application'))
self.action_dict['action_about_qt'].setStatusTip(_translate('MainWindow', "Show the Qt library's About box"))
self.action_dict['action_view_normal_style'].setObjectName('normal_style')
self.action_dict['action_view_normal_style'].setStatusTip(_translate('MainWindow', 'Switch to normal style'))
self.action_dict['action_view_dark_style'].setObjectName('dark_style')
self.action_dict['action_view_dark_style'].setStatusTip(_translate('MainWindow', 'Switch to dark style'))
self.action_dict['action_edit_file_session'].setEnabled(False)
self.action_dict['action_del_file_session'].setEnabled(False)
def initMenuBar(self):
self.menubar_dict = dict(
menu_file=self.menuBar().addMenu(_translate('MainWindow', "&File(F)")),
menu_view=self.menuBar().addMenu(_translate('MainWindow', "&View(V)")),
menu_help=self.menuBar().addMenu(_translate('MainWindow', "&Help(H)"))
)
def initMenu(self):
file_session_menu = self.menubar_dict['menu_file'].addMenu(
QtGui.QIcon(":/icon/server_16.png"), _translate('MainWindow', '&Session Manager')
)
style_menu = self.menubar_dict['menu_view'].addMenu(_translate('MainWindow', 'Skin style'))
self.menu_dict = dict(
menu_file_new_session=file_session_menu.addAction(self.action_dict['action_new_file_session']),
menu_file_edit_session=file_session_menu.addAction(self.action_dict['action_edit_file_session']),
menu_file_del_session=file_session_menu.addAction(self.action_dict['action_del_file_session']),
menu_file_connect=self.menubar_dict['menu_file'].addAction(self.action_dict['action_file_connect']),
menu_file_quit=self.menubar_dict['menu_file'].addAction(self.action_dict['action_file_quit']),
menu_view_normal_style=style_menu.addAction(self.action_dict['action_view_normal_style']),
menu_view_dark_style=style_menu.addAction(self.action_dict['action_view_dark_style']),
menu_about=self.menubar_dict['menu_help'].addAction(self.action_dict['action_about']),
menu_about_qt=self.menubar_dict['menu_help'].addAction(self.action_dict['action_about_qt']),
)
def initToolBar(self):
self.setUnifiedTitleAndToolBarOnMac(True)
file_toolbar = self.addToolBar('File')
file_toolbar.addAction(self.action_dict['action_new_file_session'])
file_toolbar.addAction(self.action_dict['action_edit_file_session'])
file_toolbar.addAction(self.action_dict['action_del_file_session'])
file_toolbar.addAction(self.action_dict['action_file_connect'])
file_toolbar.addAction(self.action_dict['action_file_quit'])
def initActionConnect(self):
self.action_dict['action_new_file_session'].triggered.connect(self.actionNewSession)
self.action_dict['action_edit_file_session'].triggered.connect(self.actionEditSession)
self.action_dict['action_del_file_session'].triggered.connect(self.actionDeleteSession)
self.action_dict['action_file_connect'].triggered.connect(self.actionConnect)
self.action_dict['action_file_quit'].triggered.connect(self.close)
self.action_dict['action_about'].triggered.connect(self.actionAbout)
self.action_dict['action_about_qt'].triggered.connect(QtWidgets.QApplication.aboutQt)
self.action_dict['action_view_normal_style'].triggered.connect(self.actionChangStyle)
self.action_dict['action_view_dark_style'].triggered.connect(self.actionChangStyle)
def initMainTabWidget(self):
self.main_tab = QtWidgets.QTabWidget()
self.setCentralWidget(self.main_tab)
# 设置为可关闭,并连接信号
self.main_tab.setTabsClosable(True)
self.main_tab.tabCloseRequested.connect(self.closeTab)
self.tab_title_user_manage = _translate("MainWindow", "User Manage")
self.tabs = {}
def initDockWidgets(self):
self.server_view_dock = HkuSessionViewWidget(self)
self.server_view_dock.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable) # 禁止关闭
self.server_view_dock.setMinimumWidth(200)
# 消除 docker window 的顶部按钮
title_bar = self.server_view_dock.titleBarWidget()
self.server_view_dock.setTitleBarWidget(QtWidgets.QWidget())
del title_bar
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.server_view_dock)
servers = self.db.session.query(SessionModel).order_by(SessionModel.name.asc()).all()
for server in servers:
server.running = False # SQLalchemy query 出来的对象并没有添加非数据库外的属性,此处手工添加保护
self.server_view_dock.addSession(server)
self.server_view_dock.user_manage_trigger.connect(self.openUserManageTab)
def actionAbout(self):
msg = _translate(
'MainWindow', "<p><b>Hikyuu Strategy Server Manager</b><p>"
"<p>Hikyuu strategy server management is used to "
"manage quant trading strategies based on hikyuu "
"quant framework</p>"
"<p><b>Hikyuu Quant Framework</b></p>"
"It is a high performance open source quantitative "
"trading research framework based on C++/Python, "
"which is used for stratgy analysis and back testing."
"Now it only used in Chinese stock market)</p>"
'<p>see more: <a href="https://hikyuu.org">https://hikyuu.org<a></p>'
)
QtWidgets.QMessageBox.about(self, _translate('MainWindow', 'About Hikyuu Strategy Server Manager'), msg)
def actionChangStyle(self):
QtWidgets.qApp.setStyleSheet('')
style_name = self.sender().objectName()
if style_name == 'dark_style':
QtWidgets.qApp.setStyleSheet(qdarkstyle.load_stylesheet(qt_api='pyqt5'))
self.ui_config.set('main_window', 'style', style_name)
def actionNewSession(self):
server_session = SessionModel()
session_dialog = HkuEditSessionDialog(self)
session_dialog.setWindowTitle(_translate("MainWindow", "New Session"))
session_dialog.setData(server_session)
if session_dialog.exec() >= 0:
session_data = session_dialog.getData()
session_data.save()
self.server_view_dock.addSession(session_data)
session_dialog.destroy()
def actionEditSession(self):
item = self.server_view_dock.tree.currentItem()
server_session = self.db.session.query(SessionModel).filter_by(name=item.text(0)).first() if item else None
if server_session is None:
QtWidgets.QMessageBox.about(
self, _translate("MainWindow", "info"), _translate("MainWindow", "Please select a session to execute")
)
return
edit_session_dialog = HkuEditSessionDialog(self)
edit_session_dialog.setWindowTitle(_translate("MainWindow", "Edit Session"))
edit_session_dialog.setData(server_session)
if edit_session_dialog.exec() >= 0:
session_data = edit_session_dialog.getData()
session_data.save()
self.server_view_dock.modifySession(item, session_data)
edit_session_dialog.destroy()
def actionDeleteSession(self):
item = self.server_view_dock.tree.currentItem()
data = item.data(0, QtCore.Qt.UserRole) if item is not None else None
if data is None:
QtWidgets.QMessageBox.about(
self, _translate("MainWindow", "info"), _translate("MainWindow", "Please select a session to execute")
)
return
ret = QtWidgets.QMessageBox.question(
self, _translate("MainWindow", "Confirm removal"),
_translate("MainWindow", "Confirm to remove the session (%s)?") % item.text(0),
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
if ret == QtWidgets.QMessageBox.Yes:
root_index = self.server_view_dock.tree.indexOfTopLevelItem(item)
self.server_view_dock.tree.takeTopLevelItem(root_index)
data.delete()
def actionConnect(self):
item = self.server_view_dock.tree.currentItem()
if item is None:
logging.error("Can't get currentItem.")
return
session = item.data(0, QtCore.Qt.UserRole)
status, msg = AssisService.getServerStatus(session)
if not session.running:
self.server_view_dock.set_gray(item)
QtWidgets.QMessageBox.warning(
self, _translate("MainWindow", "info"), _translate("MainWindow", "connection failed")
)
else:
self.server_view_dock.set_default(item)
self.server_view_dock.tree.viewport().update()
def closeTab(self, index):
title = self.main_tab.tabText(index)
self.main_tab.removeTab(index)
self.tabs[title] = None
def openUserManageTab(self, session):
title = "{}({})".format(self.tab_title_user_manage, session.name)
if title not in self.tabs or self.tabs[title] is None:
if not session.running:
QtWidgets.QMessageBox.warning(
self, _translate("MainWindow", "info"),
_translate("MainWindow", "The server is disconnected. Please connect first!")
)
else:
tab = HkuUserManagerWidget(session, self.main_tab)
self.main_tab.addTab(tab, title)
self.tabs[title] = tab
def main_core():
FORMAT = '%(asctime)-15s [%(levelname)s]: %(message)s [%(name)s::%(funcName)s]'
logging.basicConfig(format=FORMAT, level=logging.INFO, handlers=[
logging.StreamHandler(),
])
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
app = QtWidgets.QApplication(sys.argv)
main_win = MyMainWindow()
main_win.show()
exit_code = app.exec()
if exit_code == 888:
del main_win
del app
main_core()
else:
sys.exit()
if __name__ == "__main__":
main_core()
| true | true |
f724ce0ba695747e01f48c7c55ced6477e36a6ed | 823 | py | Python | python/recursion/fibonacci.py | suddi/coding-challenges | f31b53790084dce1ad0be65ec1d61bf177cddb39 | [
"MIT"
] | null | null | null | python/recursion/fibonacci.py | suddi/coding-challenges | f31b53790084dce1ad0be65ec1d61bf177cddb39 | [
"MIT"
] | 11 | 2020-01-09T06:53:45.000Z | 2022-02-11T01:34:44.000Z | python/recursion/fibonacci.py | suddi/coding-challenges | f31b53790084dce1ad0be65ec1d61bf177cddb39 | [
"MIT"
] | 1 | 2017-03-18T17:19:43.000Z | 2017-03-18T17:19:43.000Z | def solution(number): # O(N)
"""
Write a function to compute the fibonacci sequence value to the requested iteration.
>>> solution(3)
2
>>> solution(10)
55
>>> solution(20)
6765
"""
m = {
0: 0,
1: 1
} # O(1)
def run_sequence(n): # O(N)
if not isinstance(m.get(n), int): # O(1)
m[n] = run_sequence(n - 1) + run_sequence(n - 2) # O(N)
return m[n] # O(1)
return run_sequence(number) # O(N)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 30.481481 | 88 | 0.351154 | def solution(number):
m = {
0: 0,
1: 1
}
def run_sequence(n):
if not isinstance(m.get(n), int):
m[n] = run_sequence(n - 1) + run_sequence(n - 2)
return m[n]
return run_sequence(number)
if __name__ == '__main__':
import doctest
doctest.testmod()
| true | true |
f724cf0a2f8b0040903ca54f7bc46584d2243ff1 | 33 | py | Python | lib/python3.4/_bootlocale.py | caiocsalvador/whats_the_craic | c49ef62f1acd7379f6fd90c2b93aa1fa00c8661d | [
"MIT"
] | 7 | 2017-04-26T12:28:22.000Z | 2021-02-09T18:59:50.000Z | django-ng/lib/python3.4/_bootlocale.py | Arsalen/BusinessStrategies | 209e57340359af3ea063c064982198848dc36c5f | [
"MIT"
] | 13 | 2015-12-04T03:38:37.000Z | 2015-12-12T00:15:46.000Z | django-ng/lib/python3.4/_bootlocale.py | Arsalen/BusinessStrategies | 209e57340359af3ea063c064982198848dc36c5f | [
"MIT"
] | 8 | 2017-06-01T08:42:16.000Z | 2020-07-23T12:30:19.000Z | /usr/lib/python3.4/_bootlocale.py | 33 | 33 | 0.818182 | /usr/lib/python3.4/_bootlocale.py | false | true |
f724cf25e6669a9f5102947f3cef81489c325e8c | 24,857 | py | Python | pikciosdk/PikcioChain.py | Pikciochain/PikcioChainSDK | 2a89b655268516060044ec51672c0cc46f44bd9b | [
"MIT"
] | 1 | 2019-04-11T06:24:40.000Z | 2019-04-11T06:24:40.000Z | pikciosdk/PikcioChain.py | Pikciochain/PythonSDK | 2a89b655268516060044ec51672c0cc46f44bd9b | [
"MIT"
] | 3 | 2018-10-26T08:52:10.000Z | 2018-10-26T08:55:38.000Z | pikciosdk/PikcioChain.py | Pikciochain/PythonSDK | 2a89b655268516060044ec51672c0cc46f44bd9b | [
"MIT"
] | null | null | null | import base64
import json
import os
import requests
import time
from flask import Flask, jsonify, abort, make_response, redirect, request, \
url_for
from flask_oauthlib.client import OAuth
from selenium import webdriver
from config import get_config
from log import Logger
access_token = ''
def init_api_client():
"""
Initialize Flask API Client
This is necessary for the grant code method
"""
log = Logger()
config = get_config()
app_name = config.get('application', 'name')
app = Flask('{0}_api_client'.format(app_name), template_folder='templates')
os.environ['DEBUG'] = 'true'
try:
client_id = config.get('api_client', 'client_id')
client_secret = config.get('api_client', 'client_secret')
public_ip_server = config.get('server', 'public_ip')
public_port_server = config.get('server', 'public_port')
private_ip_server = config.get('server', 'public_ip')
private_port_server = config.get('server', 'public_port')
https = config.get('server', 'tls')
redirect_uri = config.getboolean('server', 'redirect_uri')
except Exception as e:
log.error('init_api_client Exception : {0}'.format(e))
return json.dumps("Invalid config file")
@app.route('/api/authorized')
def grant_code():
try:
global access_token
# get access token with the authorization_code method
# to be able to use the access token easily, we store it in a
# global variable
code = request.args.get('code')
data = {
'grant_type': 'authorization_code',
'client_id': client_id,
'client_secret': client_secret,
'code': code,
'redirect_uri': redirect_uri
}
if https:
p = requests.post(
url='https://' + public_ip_server + ':' +
public_port_server + '/oauth/token',
data=data, verify=False)
else:
p = requests.post(
url='http://' + public_ip_server + ':' +
public_port_server + '/oauth/token',
data=data, verify=False)
access_token = p.json().get('access_token')
if not access_token:
# we try with private ip
if https:
p = requests.post(
url='https://' + private_ip_server + ':' +
private_port_server + '/oauth/token',
data=data, verify=False)
else:
p = requests.post(
url='http://' + private_ip_server + ':' +
private_port_server + '/oauth/token',
data=data, verify=False)
access_token = p.json().get('access_token')
return access_token
except Exception as ex:
log.error('init_api_client Exception : {0}'.format(ex))
return json.dumps("Invalid config file")
return app
class ClientAPI:
"""
Class access for python Client API
"""
def __init__(self, username=None, password=None):
config = get_config()
self.api_public_ip = config.get('server', 'public_ip')
self.api_public_port = config.get('server', 'public_port')
self.api_private_ip = config.get('server', 'private_ip')
self.api_private_port = config.get('server', 'private_port')
self.client_id = config.get('api_client', 'client_id')
self.client_secret = config.get('api_client', 'client_secret')
self.scope = config.get('api_client', 'scope')
self.method = config.get('api_client', 'auth_type')
self.https = config.getboolean('server', 'tls')
self.username = username
self.password = password
self.log = Logger(system=self)
self.app_name = config.get('application', 'name')
self.app = Flask('{0}_api_client'.format(self.app_name))
self.oauth = OAuth(self.app)
os.environ['DEBUG'] = 'true'
if self.https:
self.api_base_url = 'https://{0}:{1}/api/'.format(
self.api_public_ip, self.api_public_port)
self.access_token_url = 'https://{0}:{1}/oauth/token'.format(
self.api_public_ip, self.api_public_port)
self.authorize_url = 'https://{0}:{1}/oauth/authorize'.format(
self.api_public_ip, self.api_public_port)
else:
self.api_base_url = 'http://{0}:{1}/api/'.format(
self.api_public_ip, self.api_public_port)
self.access_token_url = 'http://{0}:{1}/oauth/token'.format(
self.api_public_ip, self.api_public_port)
self.authorize_url = 'http://{0}:{1}/oauth/authorize'.format(
self.api_public_ip, self.api_public_port)
self.remote = self.oauth.remote_app(
'remote',
consumer_key=self.client_id,
consumer_secret=self.client_secret,
request_token_params={'scope': self.scope},
base_url=self.api_base_url,
request_token_url=None,
access_token_url=self.access_token_url,
authorize_url=self.authorize_url
)
self.remote_oauth = ''
self.access_token = ''
self.refresh_token = ''
self.retries = 0
self.req_initiator_url = ''
self.web_server = ''
"""
Everything related to API connection
"""
def get_oauth_token(self):
return self.remote_oauth
def refresh_tok(self):
token = self.get_oauth_token()
if token == '' or token[1] == '':
return self.authorize()
data = {
'grant_type': 'refresh_token',
'client_id': self.client_id,
'refresh_token': token[1],
'scope': self.scope,
'client_secret': self.client_secret,
'username': self.username,
'password': self.password
}
auth_code = base64.b64encode(
'{0}:{1}'.format(self.client_id, self.client_secret))
res = requests.post(self.access_token_url, data=data, headers={
'Authorization': 'Basic {0}'.format(auth_code)},
verify=False)
if res.status_code == 401:
self.remote_oauth = ''
return self.authorize()
if res.status_code in (200, 201):
self.remote_oauth = (
res.json().get('access_token'),
res.json().get('refresh_token'))
self.access_token = res.json().get('access_token')
self.refresh_token = res.json().get('refresh_token')
return True
return False
def require_authorize(self, f):
"""
Decorator used to validate client authorization; In case the client
is not authorized, redirect to the Authorize Page, otherwise check
if the access token expired and request new one using the refresh
token.
:return:
"""
def wrap(*args, **kwargs):
token = self.get_oauth_token()
if not token:
self.req_initiator_url = '/api'
return redirect('/authorize')
resp = f(*args, **kwargs)
if not resp.status or resp.status in (401,):
token = self.get_oauth_token()
if token and token[1]:
self.refresh_tok()
else:
return redirect('/authorize')
resp = f(*args, **kwargs)
return make_response(jsonify(resp.data), resp.status)
return wrap
def authorize(self):
if self.remote_oauth != '':
return redirect(url_for('api_index'))
next_url = request.args.get('next') or request.referrer or None
return self.remote.authorize(
callback=url_for('authorized', next=next_url, _external=True)
)
def authorized(self):
resp = self.remote.authorized_response()
# print resp
if not resp:
return jsonify(
error=request.args.get('error'),
message=request.args.get('error_description') or ''
)
elif hasattr(resp, 'data') and resp.data.get('error'):
return jsonify(
error=resp.data['error'],
message=resp.message or ''
)
if not resp.get('access_token') or not resp.get('refresh_token'):
abort(401)
self.refresh_token = resp['refresh_token']
self.access_token = resp['access_token']
if self.req_initiator_url != '':
req_initiator = self.req_initiator_url
return redirect(req_initiator)
return redirect('/api')
def deauthorize(self):
if self.remote_oauth != '':
self.remote_oauth = ''
self.refresh_token = ''
self.access_token = ''
return redirect(url_for('authorize'))
def api_index(self):
resp = self.remote.get('home')
return resp
def generic_request(self, url, method, params=None):
global access_token
try:
# if we used grant_code method, the access token variable of the
# class won't be initialised yet
if self.access_token == '':
# if the access token hasn't been got yet, we wait 5s and call
# the function again until the global variable isn't null
# anymore
if access_token != '':
self.access_token = access_token
else:
self.retries += 1
if self.retries == 3:
self.retries = 0
p = jsonify({
'error': 'Too many failed attempts to retrieve '
'access token, please try the password '
'method.'})
return p
time.sleep(5)
return self.generic_request(url, method, params)
if method.lower() == 'get':
p = requests.get(
url=url + '?access_token=' + self.access_token,
verify=False)
elif method.lower() == 'post':
p = requests.post(
url=url + '?access_token=' + self.access_token,
data=params,
headers={'Content-Type': 'application/json'}, verify=False)
elif method.lower() == 'delete':
p = requests.delete(
url=url + '?access_token=' + self.access_token,
data=params, verify=False)
else:
p = json.dumps('Bad request')
if p.status_code == 401 and self.retries < 1:
if self.refresh_tok():
self.retries += 1
if method.lower() == 'get':
p = requests.get(
url=url + '?access_token=' + self.access_token,
verify=False)
elif method.lower() == 'post':
p = requests.post(
url=url + '?access_token=' + self.access_token,
data=params,
headers={'Content-Type': 'application/json'},
verify=False)
elif method.lower() == 'delete':
p = requests.delete(
url=url + '?access_token=' + self.access_token,
data=params, verify=False)
else:
p = json.dumps('API connexion lost')
elif p.status_code == 500:
self.log.error('Server connexion error : {0}'.format(p))
return json.dumps('Server failure, please report the bug')
else:
self.retries = 0
except Exception as e:
self.log.error('generic_request Exception : {0}'.format(e))
return json.dumps('Bad request')
return p
def get_access_token(self):
try:
if self.method.lower() == 'password_header':
data = {
'grant_type': 'password',
'username': self.username,
'password': self.password,
'scope': self.scope
}
auth_code = base64.b64encode(
bytes(self.client_id + ':' + self.client_secret))
try:
p = requests.post(url=self.access_token_url, data=data,
headers={
'Authorization': 'Basic {0}'.format(
auth_code)}, verify=False,
timeout=10)
except (requests.Timeout, requests.ConnectionError):
# try with private IP
self.log.error('Failed to connect public IP, try to '
'connect private IP')
base_url = 'http{0}://{1}:{2}/'.format(
's' if self.https else '',
self.api_private_ip,
self.api_private_port
)
self.api_base_url = base_url + 'api/'
self.access_token_url = base_url + 'oauth/token'
self.authorize_url = base_url + 'oauth/authorize'
p = requests.post(url=self.access_token_url, data=data,
headers={
'Authorization': 'Basic {0}'.format(
auth_code)}, verify=False,
timeout=10)
if p and p.status_code == 401:
return json.dumps(
{'status': False, 'msg': 'API authentication failed'})
else:
self.access_token = p.json().get('access_token')
self.refresh_token = p.json().get('refresh_token')
self.remote_oauth = (self.access_token, self.refresh_token)
return json.dumps(
{'status': True, 'msg': 'API access granted'})
elif self.method.lower() == 'password_data':
data = {
'grant_type': 'password',
'client_id': self.client_id,
'client_secret': self.client_secret,
'username': self.username,
'password': self.password,
'scope': self.scope
}
try:
p = requests.post(url=self.access_token_url, data=data,
verify=False, timeout=10)
except (requests.Timeout, requests.ConnectionError):
# try with private IP
self.log.error(
'Failed to connect public IP, try to connect private '
'IP')
base_url = 'http{0}://{1}:{2}/'.format(
's' if self.https else '',
self.api_private_ip,
self.api_private_port
)
self.api_base_url = base_url + 'api/'
self.access_token_url = base_url + 'oauth/token'
self.authorize_url = base_url + 'oauth/authorize'
p = requests.post(url=self.access_token_url, data=data,
verify=False, timeout=10)
if p.status_code == 401:
return json.dumps(
{'status': False, 'msg': 'API authentication failed'})
else:
self.access_token = p.json().get('access_token')
self.refresh_token = p.json().get('refresh_token')
self.remote_oauth = (self.access_token, self.refresh_token)
return json.dumps(
{'status': True, 'msg': 'API access granted'})
# todo : to be tested + manage https and private/public IP address
elif self.method.lower() == "grant_code":
url = self.authorize_url + '?client_id=' + self.client_id + \
"&response_type=code"
driver = webdriver.Firefox()
return driver.get(url)
else:
return json.dumps(
{'status': False, 'msg': 'Invalid grant type'})
except Exception as e:
self.log.error('get_access_token Exception : {0}'.format(e))
return json.dumps(
{'status': False, 'msg': 'API authentication failed'})
"""
Everything related to the user
"""
def get_user_profile(self):
try:
p = self.generic_request(url=self.api_base_url + 'user/profile',
method='GET')
except Exception as e:
self.log.error('get_user_profile Exception : {0}'.format(e))
return json.dumps('Get user profile : Bad request')
return p
def update_user_profile(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'user/profile',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error('update_user_profile Exception : {0}'.format(e))
return json.dumps('Update user profile : Bad request')
return p
def delete_custom_profile_item(self, data):
try:
p = self.generic_request(
url=self.api_base_url + 'user/profile/delete_item',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error(
'delete_custom_profile_item Exception : {0}'.format(e))
return json.dumps('Delete custom profile item : Bad request')
return p
def get_user_avatar(self):
try:
p = self.generic_request(url=self.api_base_url + 'user/avatar',
method='GET')
except Exception as e:
self.log.error('get_user_avatar Exception : {0}'.format(e))
return json.dumps('Get user avatar : Bad request')
return p
def set_user_avatar(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'user/avatar',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error('set_user_avatar Exception : {0}'.format(e))
return json.dumps('Update user avatar : Bad request')
return p
def update_password(self, data):
try:
p = self.generic_request(
url=self.api_base_url + 'profile/change_password',
method='POST',
params=json.dumps(data))
except Exception as e:
self.log.error('update_password Exception : {0}'.format(e))
return json.dumps('Update password : Bad request')
return p
"""
Everything related to chat messages
"""
def send_chat_message(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'chat/send',
method="POST", params=json.dumps(data))
except Exception as e:
self.log.error('send_chat_message Exception : {0}'.format(e))
return json.dumps('Send chat message : Bad request')
return p
def delete_chat_message(self, msg_id):
try:
p = self.generic_request(url=self.api_base_url + 'chat/' + msg_id,
method="DELETE")
except Exception as e:
self.log.error('delete_chat_message Exception : {0}'.format(e))
return json.dumps('Delete chat message : Bad request')
return p
def get_chat_conversation(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'chat',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error('get_chat_conversation Exception : {0}'.format(e))
return json.dumps('Get chat conversation : Bad request')
return p
"""
Everything related to file messages
"""
def get_file_messages(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'file_message',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error('get_file_messages Exception : {0}'.format(e))
return json.dumps('Get file messages : Bad request')
return p
def send_file_message(self, data):
try:
p = self.generic_request(
url=self.api_base_url + 'file_message/send',
method="POST",
params=json.dumps(data))
except Exception as e:
self.log.error('send_file_message Exception : {0}'.format(e))
return json.dumps('Send file message : Bad request')
return p
def delete_file_message(self, msg_id):
try:
p = self.generic_request(
url=self.api_base_url + 'file_message/' + msg_id,
method='DELETE')
except Exception as e:
self.log.error('delete_file_message Exception : {0}'.format(e))
return json.dumps('Set file message as read : Bad request')
return p
"""
Everything related to contacts
"""
def get_contacts(self):
try:
p = self.generic_request(url=self.api_base_url + 'contacts',
method='GET')
except Exception as e:
self.log.error('get_contacts Exception : {0}'.format(e))
return json.dumps(
'Get contacts list : Bad request : {0}'.format(e))
return p
def find_user(self, query):
try:
p = self.generic_request(
url=self.api_base_url + 'contacts/find_user' + query,
method='GET')
except Exception as e:
self.log.error('find_user Exception : {0}'.format(e))
return json.dumps('Find user : Bad request')
return p
def add_contact(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'contacts/add',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error('add_contact Exception : {0}'.format(e))
return json.dumps('Add contact : Bad request')
return p
def remove_contact(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'contacts/remove',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error('remove_contact Exception : {0}'.format(e))
return json.dumps('Remove contact : Bad request')
return p
def accept_contact_request(self, matr_id):
try:
p = self.generic_request(
url=self.api_base_url + 'contacts/accept/' + matr_id,
method='GET')
except Exception as e:
self.log.error('accept_contact_request Exception : {0}'.format(e))
return json.dumps('Accept contact request : Bad request')
return p
def reject_contact_request(self, matr_id):
try:
p = self.generic_request(
url=self.api_base_url + 'contacts/reject/' + matr_id,
method='GET')
except Exception as e:
self.log.error('reject_contact_request Exception : {0}'.format(e))
return json.dumps('Reject contact request : Bad request')
return p
def get_contact_profile(self, matr_id):
try:
p = self.generic_request(
url=self.api_base_url + 'contacts/' + matr_id, method='GET')
except Exception as e:
self.log.error('get_contact_profile Exception : {0}'.format(e))
return json.dumps('Get contact profile : Bad request')
return p
| 39.083333 | 79 | 0.518968 | import base64
import json
import os
import requests
import time
from flask import Flask, jsonify, abort, make_response, redirect, request, \
url_for
from flask_oauthlib.client import OAuth
from selenium import webdriver
from config import get_config
from log import Logger
access_token = ''
def init_api_client():
log = Logger()
config = get_config()
app_name = config.get('application', 'name')
app = Flask('{0}_api_client'.format(app_name), template_folder='templates')
os.environ['DEBUG'] = 'true'
try:
client_id = config.get('api_client', 'client_id')
client_secret = config.get('api_client', 'client_secret')
public_ip_server = config.get('server', 'public_ip')
public_port_server = config.get('server', 'public_port')
private_ip_server = config.get('server', 'public_ip')
private_port_server = config.get('server', 'public_port')
https = config.get('server', 'tls')
redirect_uri = config.getboolean('server', 'redirect_uri')
except Exception as e:
log.error('init_api_client Exception : {0}'.format(e))
return json.dumps("Invalid config file")
@app.route('/api/authorized')
def grant_code():
try:
global access_token
code = request.args.get('code')
data = {
'grant_type': 'authorization_code',
'client_id': client_id,
'client_secret': client_secret,
'code': code,
'redirect_uri': redirect_uri
}
if https:
p = requests.post(
url='https://' + public_ip_server + ':' +
public_port_server + '/oauth/token',
data=data, verify=False)
else:
p = requests.post(
url='http://' + public_ip_server + ':' +
public_port_server + '/oauth/token',
data=data, verify=False)
access_token = p.json().get('access_token')
if not access_token:
if https:
p = requests.post(
url='https://' + private_ip_server + ':' +
private_port_server + '/oauth/token',
data=data, verify=False)
else:
p = requests.post(
url='http://' + private_ip_server + ':' +
private_port_server + '/oauth/token',
data=data, verify=False)
access_token = p.json().get('access_token')
return access_token
except Exception as ex:
log.error('init_api_client Exception : {0}'.format(ex))
return json.dumps("Invalid config file")
return app
class ClientAPI:
def __init__(self, username=None, password=None):
config = get_config()
self.api_public_ip = config.get('server', 'public_ip')
self.api_public_port = config.get('server', 'public_port')
self.api_private_ip = config.get('server', 'private_ip')
self.api_private_port = config.get('server', 'private_port')
self.client_id = config.get('api_client', 'client_id')
self.client_secret = config.get('api_client', 'client_secret')
self.scope = config.get('api_client', 'scope')
self.method = config.get('api_client', 'auth_type')
self.https = config.getboolean('server', 'tls')
self.username = username
self.password = password
self.log = Logger(system=self)
self.app_name = config.get('application', 'name')
self.app = Flask('{0}_api_client'.format(self.app_name))
self.oauth = OAuth(self.app)
os.environ['DEBUG'] = 'true'
if self.https:
self.api_base_url = 'https://{0}:{1}/api/'.format(
self.api_public_ip, self.api_public_port)
self.access_token_url = 'https://{0}:{1}/oauth/token'.format(
self.api_public_ip, self.api_public_port)
self.authorize_url = 'https://{0}:{1}/oauth/authorize'.format(
self.api_public_ip, self.api_public_port)
else:
self.api_base_url = 'http://{0}:{1}/api/'.format(
self.api_public_ip, self.api_public_port)
self.access_token_url = 'http://{0}:{1}/oauth/token'.format(
self.api_public_ip, self.api_public_port)
self.authorize_url = 'http://{0}:{1}/oauth/authorize'.format(
self.api_public_ip, self.api_public_port)
self.remote = self.oauth.remote_app(
'remote',
consumer_key=self.client_id,
consumer_secret=self.client_secret,
request_token_params={'scope': self.scope},
base_url=self.api_base_url,
request_token_url=None,
access_token_url=self.access_token_url,
authorize_url=self.authorize_url
)
self.remote_oauth = ''
self.access_token = ''
self.refresh_token = ''
self.retries = 0
self.req_initiator_url = ''
self.web_server = ''
def get_oauth_token(self):
return self.remote_oauth
def refresh_tok(self):
token = self.get_oauth_token()
if token == '' or token[1] == '':
return self.authorize()
data = {
'grant_type': 'refresh_token',
'client_id': self.client_id,
'refresh_token': token[1],
'scope': self.scope,
'client_secret': self.client_secret,
'username': self.username,
'password': self.password
}
auth_code = base64.b64encode(
'{0}:{1}'.format(self.client_id, self.client_secret))
res = requests.post(self.access_token_url, data=data, headers={
'Authorization': 'Basic {0}'.format(auth_code)},
verify=False)
if res.status_code == 401:
self.remote_oauth = ''
return self.authorize()
if res.status_code in (200, 201):
self.remote_oauth = (
res.json().get('access_token'),
res.json().get('refresh_token'))
self.access_token = res.json().get('access_token')
self.refresh_token = res.json().get('refresh_token')
return True
return False
def require_authorize(self, f):
def wrap(*args, **kwargs):
token = self.get_oauth_token()
if not token:
self.req_initiator_url = '/api'
return redirect('/authorize')
resp = f(*args, **kwargs)
if not resp.status or resp.status in (401,):
token = self.get_oauth_token()
if token and token[1]:
self.refresh_tok()
else:
return redirect('/authorize')
resp = f(*args, **kwargs)
return make_response(jsonify(resp.data), resp.status)
return wrap
def authorize(self):
if self.remote_oauth != '':
return redirect(url_for('api_index'))
next_url = request.args.get('next') or request.referrer or None
return self.remote.authorize(
callback=url_for('authorized', next=next_url, _external=True)
)
def authorized(self):
resp = self.remote.authorized_response()
if not resp:
return jsonify(
error=request.args.get('error'),
message=request.args.get('error_description') or ''
)
elif hasattr(resp, 'data') and resp.data.get('error'):
return jsonify(
error=resp.data['error'],
message=resp.message or ''
)
if not resp.get('access_token') or not resp.get('refresh_token'):
abort(401)
self.refresh_token = resp['refresh_token']
self.access_token = resp['access_token']
if self.req_initiator_url != '':
req_initiator = self.req_initiator_url
return redirect(req_initiator)
return redirect('/api')
def deauthorize(self):
if self.remote_oauth != '':
self.remote_oauth = ''
self.refresh_token = ''
self.access_token = ''
return redirect(url_for('authorize'))
def api_index(self):
resp = self.remote.get('home')
return resp
def generic_request(self, url, method, params=None):
global access_token
try:
if self.access_token == '':
# if the access token hasn't been got yet, we wait 5s and call
# anymore
if access_token != '':
self.access_token = access_token
else:
self.retries += 1
if self.retries == 3:
self.retries = 0
p = jsonify({
'error': 'Too many failed attempts to retrieve '
'access token, please try the password '
'method.'})
return p
time.sleep(5)
return self.generic_request(url, method, params)
if method.lower() == 'get':
p = requests.get(
url=url + '?access_token=' + self.access_token,
verify=False)
elif method.lower() == 'post':
p = requests.post(
url=url + '?access_token=' + self.access_token,
data=params,
headers={'Content-Type': 'application/json'}, verify=False)
elif method.lower() == 'delete':
p = requests.delete(
url=url + '?access_token=' + self.access_token,
data=params, verify=False)
else:
p = json.dumps('Bad request')
if p.status_code == 401 and self.retries < 1:
if self.refresh_tok():
self.retries += 1
if method.lower() == 'get':
p = requests.get(
url=url + '?access_token=' + self.access_token,
verify=False)
elif method.lower() == 'post':
p = requests.post(
url=url + '?access_token=' + self.access_token,
data=params,
headers={'Content-Type': 'application/json'},
verify=False)
elif method.lower() == 'delete':
p = requests.delete(
url=url + '?access_token=' + self.access_token,
data=params, verify=False)
else:
p = json.dumps('API connexion lost')
elif p.status_code == 500:
self.log.error('Server connexion error : {0}'.format(p))
return json.dumps('Server failure, please report the bug')
else:
self.retries = 0
except Exception as e:
self.log.error('generic_request Exception : {0}'.format(e))
return json.dumps('Bad request')
return p
def get_access_token(self):
try:
if self.method.lower() == 'password_header':
data = {
'grant_type': 'password',
'username': self.username,
'password': self.password,
'scope': self.scope
}
auth_code = base64.b64encode(
bytes(self.client_id + ':' + self.client_secret))
try:
p = requests.post(url=self.access_token_url, data=data,
headers={
'Authorization': 'Basic {0}'.format(
auth_code)}, verify=False,
timeout=10)
except (requests.Timeout, requests.ConnectionError):
# try with private IP
self.log.error('Failed to connect public IP, try to '
'connect private IP')
base_url = 'http{0}://{1}:{2}/'.format(
's' if self.https else '',
self.api_private_ip,
self.api_private_port
)
self.api_base_url = base_url + 'api/'
self.access_token_url = base_url + 'oauth/token'
self.authorize_url = base_url + 'oauth/authorize'
p = requests.post(url=self.access_token_url, data=data,
headers={
'Authorization': 'Basic {0}'.format(
auth_code)}, verify=False,
timeout=10)
if p and p.status_code == 401:
return json.dumps(
{'status': False, 'msg': 'API authentication failed'})
else:
self.access_token = p.json().get('access_token')
self.refresh_token = p.json().get('refresh_token')
self.remote_oauth = (self.access_token, self.refresh_token)
return json.dumps(
{'status': True, 'msg': 'API access granted'})
elif self.method.lower() == 'password_data':
data = {
'grant_type': 'password',
'client_id': self.client_id,
'client_secret': self.client_secret,
'username': self.username,
'password': self.password,
'scope': self.scope
}
try:
p = requests.post(url=self.access_token_url, data=data,
verify=False, timeout=10)
except (requests.Timeout, requests.ConnectionError):
# try with private IP
self.log.error(
'Failed to connect public IP, try to connect private '
'IP')
base_url = 'http{0}://{1}:{2}/'.format(
's' if self.https else '',
self.api_private_ip,
self.api_private_port
)
self.api_base_url = base_url + 'api/'
self.access_token_url = base_url + 'oauth/token'
self.authorize_url = base_url + 'oauth/authorize'
p = requests.post(url=self.access_token_url, data=data,
verify=False, timeout=10)
if p.status_code == 401:
return json.dumps(
{'status': False, 'msg': 'API authentication failed'})
else:
self.access_token = p.json().get('access_token')
self.refresh_token = p.json().get('refresh_token')
self.remote_oauth = (self.access_token, self.refresh_token)
return json.dumps(
{'status': True, 'msg': 'API access granted'})
# todo : to be tested + manage https and private/public IP address
elif self.method.lower() == "grant_code":
url = self.authorize_url + '?client_id=' + self.client_id + \
"&response_type=code"
driver = webdriver.Firefox()
return driver.get(url)
else:
return json.dumps(
{'status': False, 'msg': 'Invalid grant type'})
except Exception as e:
self.log.error('get_access_token Exception : {0}'.format(e))
return json.dumps(
{'status': False, 'msg': 'API authentication failed'})
def get_user_profile(self):
try:
p = self.generic_request(url=self.api_base_url + 'user/profile',
method='GET')
except Exception as e:
self.log.error('get_user_profile Exception : {0}'.format(e))
return json.dumps('Get user profile : Bad request')
return p
def update_user_profile(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'user/profile',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error('update_user_profile Exception : {0}'.format(e))
return json.dumps('Update user profile : Bad request')
return p
def delete_custom_profile_item(self, data):
try:
p = self.generic_request(
url=self.api_base_url + 'user/profile/delete_item',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error(
'delete_custom_profile_item Exception : {0}'.format(e))
return json.dumps('Delete custom profile item : Bad request')
return p
def get_user_avatar(self):
try:
p = self.generic_request(url=self.api_base_url + 'user/avatar',
method='GET')
except Exception as e:
self.log.error('get_user_avatar Exception : {0}'.format(e))
return json.dumps('Get user avatar : Bad request')
return p
def set_user_avatar(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'user/avatar',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error('set_user_avatar Exception : {0}'.format(e))
return json.dumps('Update user avatar : Bad request')
return p
def update_password(self, data):
try:
p = self.generic_request(
url=self.api_base_url + 'profile/change_password',
method='POST',
params=json.dumps(data))
except Exception as e:
self.log.error('update_password Exception : {0}'.format(e))
return json.dumps('Update password : Bad request')
return p
def send_chat_message(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'chat/send',
method="POST", params=json.dumps(data))
except Exception as e:
self.log.error('send_chat_message Exception : {0}'.format(e))
return json.dumps('Send chat message : Bad request')
return p
def delete_chat_message(self, msg_id):
try:
p = self.generic_request(url=self.api_base_url + 'chat/' + msg_id,
method="DELETE")
except Exception as e:
self.log.error('delete_chat_message Exception : {0}'.format(e))
return json.dumps('Delete chat message : Bad request')
return p
def get_chat_conversation(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'chat',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error('get_chat_conversation Exception : {0}'.format(e))
return json.dumps('Get chat conversation : Bad request')
return p
def get_file_messages(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'file_message',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error('get_file_messages Exception : {0}'.format(e))
return json.dumps('Get file messages : Bad request')
return p
def send_file_message(self, data):
try:
p = self.generic_request(
url=self.api_base_url + 'file_message/send',
method="POST",
params=json.dumps(data))
except Exception as e:
self.log.error('send_file_message Exception : {0}'.format(e))
return json.dumps('Send file message : Bad request')
return p
def delete_file_message(self, msg_id):
try:
p = self.generic_request(
url=self.api_base_url + 'file_message/' + msg_id,
method='DELETE')
except Exception as e:
self.log.error('delete_file_message Exception : {0}'.format(e))
return json.dumps('Set file message as read : Bad request')
return p
def get_contacts(self):
try:
p = self.generic_request(url=self.api_base_url + 'contacts',
method='GET')
except Exception as e:
self.log.error('get_contacts Exception : {0}'.format(e))
return json.dumps(
'Get contacts list : Bad request : {0}'.format(e))
return p
def find_user(self, query):
try:
p = self.generic_request(
url=self.api_base_url + 'contacts/find_user' + query,
method='GET')
except Exception as e:
self.log.error('find_user Exception : {0}'.format(e))
return json.dumps('Find user : Bad request')
return p
def add_contact(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'contacts/add',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error('add_contact Exception : {0}'.format(e))
return json.dumps('Add contact : Bad request')
return p
def remove_contact(self, data):
try:
p = self.generic_request(url=self.api_base_url + 'contacts/remove',
method='POST', params=json.dumps(data))
except Exception as e:
self.log.error('remove_contact Exception : {0}'.format(e))
return json.dumps('Remove contact : Bad request')
return p
def accept_contact_request(self, matr_id):
try:
p = self.generic_request(
url=self.api_base_url + 'contacts/accept/' + matr_id,
method='GET')
except Exception as e:
self.log.error('accept_contact_request Exception : {0}'.format(e))
return json.dumps('Accept contact request : Bad request')
return p
def reject_contact_request(self, matr_id):
try:
p = self.generic_request(
url=self.api_base_url + 'contacts/reject/' + matr_id,
method='GET')
except Exception as e:
self.log.error('reject_contact_request Exception : {0}'.format(e))
return json.dumps('Reject contact request : Bad request')
return p
def get_contact_profile(self, matr_id):
try:
p = self.generic_request(
url=self.api_base_url + 'contacts/' + matr_id, method='GET')
except Exception as e:
self.log.error('get_contact_profile Exception : {0}'.format(e))
return json.dumps('Get contact profile : Bad request')
return p
| true | true |
f724cfc965d385156ea1686e76199775451ff589 | 4,083 | py | Python | test/functional/feature_includeconf.py | minblock/arcticoin | fc0ee011cc8a27cc22dd9841d563b37a8fa12255 | [
"MIT"
] | null | null | null | test/functional/feature_includeconf.py | minblock/arcticoin | fc0ee011cc8a27cc22dd9841d563b37a8fa12255 | [
"MIT"
] | null | null | null | test/functional/feature_includeconf.py | minblock/arcticoin | fc0ee011cc8a27cc22dd9841d563b37a8fa12255 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests the includeconf argument
Verify that:
1. adding includeconf to the configuration file causes the includeconf
file to be loaded in the correct order.
2. includeconf cannot be used as a command line argument.
3. includeconf cannot be used recursively (ie includeconf can only
be used from the base config file).
4. multiple includeconf arguments can be specified in the main config
file.
"""
import os
from test_framework.test_framework import BitcoinTestFramework
class IncludeConfTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
# Create additional config files
# - tmpdir/node0/relative.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
# - tmpdir/node0/relative2.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "arcticoin.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot contain invalid arg")
# Commented out as long as we ignore invalid arguments in configuration files
#with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# f.write("foo=bar\n")
#self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Invalid configuration value foo")
self.log.info("-includeconf cannot be invalid path")
os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Failed to include configuration file relative.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# Restore initial file contents
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "arcticoin.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
| 49.192771 | 224 | 0.694832 |
import os
from test_framework.test_framework import BitcoinTestFramework
class IncludeConfTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "arcticoin.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot contain invalid arg")
self.log.info("-includeconf cannot be invalid path")
os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Failed to include configuration file relative.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "arcticoin.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
| true | true |
f724cfe100b6d8d018d01d3fec03c2b0c5e1f781 | 6,287 | py | Python | DuelingDQN/agent.py | emarche/Value-based-DeepRL | 8b6458d4b82f293b401fc9e9c81cc482e0948830 | [
"MIT"
] | 1 | 2021-06-21T06:25:43.000Z | 2021-06-21T06:25:43.000Z | DuelingDQN/agent.py | emarche/Value-based-DeepRL | 8b6458d4b82f293b401fc9e9c81cc482e0948830 | [
"MIT"
] | null | null | null | DuelingDQN/agent.py | emarche/Value-based-DeepRL | 8b6458d4b82f293b401fc9e9c81cc482e0948830 | [
"MIT"
] | null | null | null | """DuelingDQN agent script
This manages the training phase of the off-policy DuelingDQN.
"""
import random
from collections import deque
import yaml
import numpy as np
with open('config.yml', 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
seed = cfg['setup']['seed']
ymlfile.close()
random.seed(seed)
np.random.seed(seed)
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
tf.random.set_seed(seed)
from utils.deepnetwork import DeepNetwork
from utils.memorybuffer import Buffer
class DuelingDQN:
"""
Class for the DuelingDQN agent
"""
def __init__(self, env, params):
"""Initialize the agent, its network, optimizer and buffer
Args:
env (gym): gym environment
params (dict): agent parameters (e.g.,dnn structure)
Returns:
None
"""
self.env = env
self.model = DeepNetwork.build(env, params['dnn'])
self.model_opt = Adam()
self.buffer = Buffer(params['buffer']['size'])
def get_action(self, state, eps):
"""Get the action to perform
Args:
state (list): agent current state
eps (float): random action probability
Returns:
action (float): sampled actions to perform
"""
if np.random.uniform() <= eps:
return np.random.randint(0, self.env.action_space.n)
q_values = self.model(np.array([state])).numpy()[0]
return np.argmax(q_values)
def update(self, gamma, batch_size):
"""Prepare the samples to update the network
Args:
gamma (float): discount factor
batch_size (int): batch size for the off-policy A2C
Returns:
None
"""
batch_size = min(self.buffer.size, batch_size)
states, actions, rewards, obs_states, dones = self.buffer.sample(batch_size)
# The updates require shape (n° samples, len(metric))
rewards = rewards.reshape(-1, 1)
dones = dones.reshape(-1, 1)
self.fit(gamma, states, actions, rewards, obs_states, dones)
def fit(self, gamma, states, actions, rewards, obs_states, dones):
"""We want to minimizing mse of the temporal difference error given by Q(s,a|θ) and the target y = r + γ max_a' Q(s', a'|θ). This version is based on vanilla DQN, so it presents the non-stationary targets (i.e., the same network estimates its values and its targets).
The dueling follows the idea that the advantage A(s, a) = Q(s, a) - V(s). Hence it splits the network into two streams, one that estimates V(S) and one that estimates A(s, a). It then recomposes the two stream into the output layer that forms Q(s, a). The idea is that is that if a state is bad, it is bad regardless of the actions. This way we reduce the exploration and the requirement to evaluate all the actions to converge.
Args:
gamma (float): discount factor
states (list): episode's states for the update
actions (list): episode's actions for the update
rewards (list): episode's rewards for the update
obs_states (list): episode's obs_states for the update
dones (list): episode's dones for the update
Returns:
None
"""
with tf.GradientTape() as tape:
# Compute the target y = r + γ max_a' Q(s', a'|θ)
obs_qvalues = self.model(obs_states)
obs_action = tf.math.argmax(obs_qvalues, axis=-1).numpy()
idxs = np.array([[int(i), int(action)] for i, action in enumerate(obs_action)])
max_obs_qvalues = tf.expand_dims(tf.gather_nd(obs_qvalues, idxs), axis=-1)
y = rewards + gamma * max_obs_qvalues * dones
# Compute values Q(s,a|θ)
qvalues = self.model(states)
idxs = np.array([[int(i), int(action)] for i, action in enumerate(actions)])
qvalues = tf.expand_dims(tf.gather_nd(qvalues, idxs), axis=-1)
# Compute the loss as mse of Q(s, a) - y
td_errors = tf.math.subtract(qvalues, y)
td_errors = 0.5 * tf.math.square(td_errors)
loss = tf.math.reduce_mean(td_errors)
# Compute the model gradient and update the network
grad = tape.gradient(loss, self.model.trainable_variables)
self.model_opt.apply_gradients(zip(grad, self.model.trainable_variables))
def train(self, tracker, n_episodes, verbose, params, hyperp):
"""Main loop for the agent's training phase
Args:
tracker (object): used to store and save the training stats
n_episodes (int): n° of episodes to perform
verbose (int): how frequent we save the training stats
params (dict): agent parameters (e.g., the critic's gamma)
hyperp (dict): algorithmic specific values (e.g., tau)
Returns:
None
"""
mean_reward = deque(maxlen=100)
eps, eps_min = params['eps'], params['eps_min']
eps_decay = hyperp['eps_d']
for e in range(n_episodes):
ep_reward, steps = 0, 0
state = self.env.reset()
while True:
action = self.get_action(state, eps)
obs_state, obs_reward, done, _ = self.env.step(action)
self.buffer.store(state,
action,
obs_reward,
obs_state,
1 - int(done)
)
ep_reward += obs_reward
steps += 1
state = obs_state
if e > params['update_start']:
self.update(
params['gamma'],
params['buffer']['batch']
)
if done: break
eps = max(eps_min, eps * eps_decay)
mean_reward.append(ep_reward)
tracker.update([e, ep_reward])
if e % verbose == 0: tracker.save_metrics()
print(f'Ep: {e}, Ep_Rew: {ep_reward}, Mean_Rew: {np.mean(mean_reward)}')
| 33.801075 | 436 | 0.576905 |
import random
from collections import deque
import yaml
import numpy as np
with open('config.yml', 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
seed = cfg['setup']['seed']
ymlfile.close()
random.seed(seed)
np.random.seed(seed)
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
tf.random.set_seed(seed)
from utils.deepnetwork import DeepNetwork
from utils.memorybuffer import Buffer
class DuelingDQN:
def __init__(self, env, params):
self.env = env
self.model = DeepNetwork.build(env, params['dnn'])
self.model_opt = Adam()
self.buffer = Buffer(params['buffer']['size'])
def get_action(self, state, eps):
if np.random.uniform() <= eps:
return np.random.randint(0, self.env.action_space.n)
q_values = self.model(np.array([state])).numpy()[0]
return np.argmax(q_values)
def update(self, gamma, batch_size):
batch_size = min(self.buffer.size, batch_size)
states, actions, rewards, obs_states, dones = self.buffer.sample(batch_size)
rewards = rewards.reshape(-1, 1)
dones = dones.reshape(-1, 1)
self.fit(gamma, states, actions, rewards, obs_states, dones)
def fit(self, gamma, states, actions, rewards, obs_states, dones):
with tf.GradientTape() as tape:
obs_qvalues = self.model(obs_states)
obs_action = tf.math.argmax(obs_qvalues, axis=-1).numpy()
idxs = np.array([[int(i), int(action)] for i, action in enumerate(obs_action)])
max_obs_qvalues = tf.expand_dims(tf.gather_nd(obs_qvalues, idxs), axis=-1)
y = rewards + gamma * max_obs_qvalues * dones
# Compute values Q(s,a|θ)
qvalues = self.model(states)
idxs = np.array([[int(i), int(action)] for i, action in enumerate(actions)])
qvalues = tf.expand_dims(tf.gather_nd(qvalues, idxs), axis=-1)
# Compute the loss as mse of Q(s, a) - y
td_errors = tf.math.subtract(qvalues, y)
td_errors = 0.5 * tf.math.square(td_errors)
loss = tf.math.reduce_mean(td_errors)
# Compute the model gradient and update the network
grad = tape.gradient(loss, self.model.trainable_variables)
self.model_opt.apply_gradients(zip(grad, self.model.trainable_variables))
def train(self, tracker, n_episodes, verbose, params, hyperp):
mean_reward = deque(maxlen=100)
eps, eps_min = params['eps'], params['eps_min']
eps_decay = hyperp['eps_d']
for e in range(n_episodes):
ep_reward, steps = 0, 0
state = self.env.reset()
while True:
action = self.get_action(state, eps)
obs_state, obs_reward, done, _ = self.env.step(action)
self.buffer.store(state,
action,
obs_reward,
obs_state,
1 - int(done)
)
ep_reward += obs_reward
steps += 1
state = obs_state
if e > params['update_start']:
self.update(
params['gamma'],
params['buffer']['batch']
)
if done: break
eps = max(eps_min, eps * eps_decay)
mean_reward.append(ep_reward)
tracker.update([e, ep_reward])
if e % verbose == 0: tracker.save_metrics()
print(f'Ep: {e}, Ep_Rew: {ep_reward}, Mean_Rew: {np.mean(mean_reward)}')
| true | true |
f724d00e6329a8b543e74c51d75c61d1be59e773 | 165 | py | Python | Oops/Python.py | SharkDeveloper/Client-Server-app | 071092f7f0a5ecc0c7e05eb8a5abeda759216709 | [
"Apache-2.0"
] | null | null | null | Oops/Python.py | SharkDeveloper/Client-Server-app | 071092f7f0a5ecc0c7e05eb8a5abeda759216709 | [
"Apache-2.0"
] | null | null | null | Oops/Python.py | SharkDeveloper/Client-Server-app | 071092f7f0a5ecc0c7e05eb8a5abeda759216709 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
try:
from PySide import QtWidgets
except:
from PyQt5 import QtWidgets
class Python:
def __init__(self):
print("Hi")
| 11.785714 | 32 | 0.612121 |
try:
from PySide import QtWidgets
except:
from PyQt5 import QtWidgets
class Python:
def __init__(self):
print("Hi")
| true | true |
f724d0189448a885ec38db8eea6a6121e6ff2796 | 11,397 | py | Python | django/db/backends/base/features.py | thomazzo/django | b0d716cbffdd66dd9108895d0524bef2530fc732 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/db/backends/base/features.py | thomazzo/django | b0d716cbffdd66dd9108895d0524bef2530fc732 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/db/backends/base/features.py | thomazzo/django | b0d716cbffdd66dd9108895d0524bef2530fc732 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | from django.db.models.aggregates import StdDev
from django.db.utils import NotSupportedError, ProgrammingError
from django.utils.functional import cached_property
class BaseDatabaseFeatures:
gis_enabled = False
allows_group_by_pk = False
allows_group_by_selected_pks = False
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate NULL rows in a nullable
# unique field? All core backends implement this correctly, but other
# databases such as SQL Server do not.
supports_nullable_unique_constraints = True
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists and some fields are nullable but not all of them?
supports_partially_nullable_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
can_return_ids_from_bulk_insert = False
has_bulk_insert = True
uses_savepoints = False
can_release_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries_with_in = True
has_select_for_update = False
has_select_for_update_nowait = False
has_select_for_update_skip_locked = False
has_select_for_update_of = False
# Does the database's SELECT FOR UPDATE OF syntax require a column rather
# than a table?
select_for_update_of_column = False
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does the backend truncate names properly when they are too long?
truncates_names = False
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
# Is there a true datatype for uuid?
has_native_uuid_field = False
# Is there a true datatype for timedeltas?
has_native_duration_field = False
# Does the database driver supports same type temporal data subtraction
# by returning the type used to store duration field?
supports_temporal_subtraction = False
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Does the backend order NULL values as largest or smallest?
nulls_order_largest = False
# The database's limit on the number of query parameters.
max_query_params = None
# Can an object have an autoincrement primary key of 0? MySQL says No.
allows_auto_pk_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Can the backend introspect the default value of a column?
can_introspect_default = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Can the backend introspect an AutoField, instead of an IntegerField?
can_introspect_autofield = False
# Can the backend introspect a BigIntegerField, instead of an IntegerField?
can_introspect_big_integer_field = True
# Can the backend introspect an BinaryField, instead of an TextField?
can_introspect_binary_field = True
# Can the backend introspect an DecimalField, instead of an FloatField?
can_introspect_decimal_field = True
# Can the backend introspect a DurationField, instead of a BigIntegerField?
can_introspect_duration_field = True
# Can the backend introspect an IPAddressField, instead of an CharField?
can_introspect_ip_address_field = False
# Can the backend introspect a PositiveIntegerField, instead of an IntegerField?
can_introspect_positive_integer_field = False
# Can the backend introspect a SmallIntegerField, instead of an IntegerField?
can_introspect_small_integer_field = False
# Can the backend introspect a TimeField, instead of a DateTimeField?
can_introspect_time_field = True
# Some backends may not be able to differentiate BooleanField from other
# fields such as IntegerField.
introspected_boolean_field_type = 'BooleanField'
# Can the backend introspect the column order (ASC/DESC) for indexes?
supports_index_column_ordering = True
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend decide to commit before SAVEPOINT statements
# when autocommit is disabled? https://bugs.python.org/issue8145#msg109965
autocommits_when_autocommit_is_off = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
# Does it support operations requiring references rename in a transaction?
supports_atomic_references_rename = True
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# Does it support foreign keys?
supports_foreign_keys = True
# Does it support CHECK constraints?
supports_column_check_constraints = True
supports_table_check_constraints = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend require literal defaults, rather than parameterized ones?
requires_literal_defaults = False
# Does the backend require a connection reset after each material schema change?
connection_persists_old_columns = False
# What kind of error does the backend throw when accessing closed cursor?
closed_cursor_error_class = ProgrammingError
# Does 'a' LIKE 'A' match?
has_case_insensitive_like = True
# Does the backend require the sqlparse library for splitting multi-line
# statements before executing them?
requires_sqlparse_for_splitting = True
# Suffix for backends that don't support "SELECT xxx;" queries.
bare_select_suffix = ''
# If NULL is implied on columns without needing to be explicitly specified
implied_column_null = False
uppercases_column_names = False
# Does the backend support "select for update" queries with limit (and offset)?
supports_select_for_update_with_limit = True
# Does the backend ignore null expressions in GREATEST and LEAST queries unless
# every expression is null?
greatest_least_ignores_nulls = False
# Can the backend clone databases for parallel test execution?
# Defaults to False to allow third-party backends to opt-in.
can_clone_databases = False
# Does the backend consider table names with different casing to
# be equal?
ignores_table_name_case = False
# Place FOR UPDATE right after FROM clause. Used on MSSQL.
for_update_after_from = False
# Combinatorial flags
supports_select_union = True
supports_select_intersection = True
supports_select_difference = True
supports_slicing_ordering_in_compound = False
# Does the database support SQL 2003 FILTER (WHERE ...) in aggregate
# expressions?
supports_aggregate_filter_clause = False
# Does the backend support indexing a TextField?
supports_index_on_text_field = True
# Does the backed support window expressions (expression OVER (...))?
supports_over_clause = False
# Does the backend support CAST with precision?
supports_cast_with_precision = True
# How many second decimals does the database return when casting a value to
# a type with time?
time_cast_precision = 6
# SQL to create a procedure for use by the Django test suite. The
# functionality of the procedure isn't important.
create_test_procedure_without_params_sql = None
create_test_procedure_with_int_param_sql = None
# Does the backend support keyword parameters for cursor.callproc()?
supports_callproc_kwargs = False
# Convert CharField results from bytes to str in database functions.
db_functions_convert_bytes_to_str = False
# What formats does the backend EXPLAIN syntax support?
supported_explain_formats = set()
# Does DatabaseOperations.explain_query_prefix() raise ValueError if
# unknown kwargs are passed to QuerySet.explain()?
validates_explain_options = True
# Does the backend support the default parameter in lead() and lag()?
supports_default_in_lead_lag = True
# Does the backend support ignoring constraint or uniqueness errors during
# INSERT?
supports_ignore_conflicts = True
# Does this backend require casting the results of CASE expressions used
# in UPDATE statements to ensure the expression has the correct type?
requires_casted_case_in_updates = False
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_explaining_query_execution(self):
"""Does this backend support explaining query execution?"""
return self.connection.ops.explain_prefix is not None
@cached_property
def supports_transactions(self):
"""Confirm support for transactions."""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection.set_autocommit(False)
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection.rollback()
self.connection.set_autocommit(True)
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
return count == 0
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions."""
try:
self.connection.ops.check_expression_support(StdDev(1))
except NotSupportedError:
return False
return True
| 36.883495 | 85 | 0.742476 | from django.db.models.aggregates import StdDev
from django.db.utils import NotSupportedError, ProgrammingError
from django.utils.functional import cached_property
class BaseDatabaseFeatures:
gis_enabled = False
allows_group_by_pk = False
allows_group_by_selected_pks = False
empty_fetchmany_value = []
update_can_self_select = True
interprets_empty_strings_as_nulls = False
supports_nullable_unique_constraints = True
supports_partially_nullable_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
can_return_ids_from_bulk_insert = False
has_bulk_insert = True
uses_savepoints = False
can_release_savepoints = False
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries_with_in = True
has_select_for_update = False
has_select_for_update_nowait = False
has_select_for_update_skip_locked = False
has_select_for_update_of = False
# Does the database's SELECT FOR UPDATE OF syntax require a column rather
select_for_update_of_column = False
test_db_allows_multiple_connections = True
supports_unspecified_pk = False
supports_forward_references = True
truncates_names = False
has_real_datatype = False
supports_subqueries_in_group_by = True
has_native_uuid_field = False
has_native_duration_field = False
supports_temporal_subtraction = False
supports_regex_backreferencing = True
supports_date_lookup_using_string = True
supports_timezones = True
has_zoneinfo_database = True
requires_explicit_null_ordering_when_grouping = False
nulls_order_largest = False
max_query_params = None
# Can an object have an autoincrement primary key of 0? MySQL says No.
allows_auto_pk_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
supports_tablespaces = False
supports_sequence_reset = True
can_introspect_default = True
can_introspect_foreign_keys = True
# Can the backend introspect an AutoField, instead of an IntegerField?
can_introspect_autofield = False
# Can the backend introspect a BigIntegerField, instead of an IntegerField?
can_introspect_big_integer_field = True
# Can the backend introspect an BinaryField, instead of an TextField?
can_introspect_binary_field = True
# Can the backend introspect an DecimalField, instead of an FloatField?
can_introspect_decimal_field = True
# Can the backend introspect a DurationField, instead of a BigIntegerField?
can_introspect_duration_field = True
# Can the backend introspect an IPAddressField, instead of an CharField?
can_introspect_ip_address_field = False
# Can the backend introspect a PositiveIntegerField, instead of an IntegerField?
can_introspect_positive_integer_field = False
# Can the backend introspect a SmallIntegerField, instead of an IntegerField?
can_introspect_small_integer_field = False
# Can the backend introspect a TimeField, instead of a DateTimeField?
can_introspect_time_field = True
# Some backends may not be able to differentiate BooleanField from other
# fields such as IntegerField.
introspected_boolean_field_type = 'BooleanField'
# Can the backend introspect the column order (ASC/DESC) for indexes?
supports_index_column_ordering = True
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend decide to commit before SAVEPOINT statements
# when autocommit is disabled? https://bugs.python.org/issue8145#msg109965
autocommits_when_autocommit_is_off = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
# Does it support operations requiring references rename in a transaction?
supports_atomic_references_rename = True
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# Does it support foreign keys?
supports_foreign_keys = True
# Does it support CHECK constraints?
supports_column_check_constraints = True
supports_table_check_constraints = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend require literal defaults, rather than parameterized ones?
requires_literal_defaults = False
# Does the backend require a connection reset after each material schema change?
connection_persists_old_columns = False
# What kind of error does the backend throw when accessing closed cursor?
closed_cursor_error_class = ProgrammingError
# Does 'a' LIKE 'A' match?
has_case_insensitive_like = True
# Does the backend require the sqlparse library for splitting multi-line
# statements before executing them?
requires_sqlparse_for_splitting = True
# Suffix for backends that don't support "SELECT xxx;" queries.
bare_select_suffix = ''
implied_column_null = False
uppercases_column_names = False
supports_select_for_update_with_limit = True
greatest_least_ignores_nulls = False
can_clone_databases = False
ignores_table_name_case = False
for_update_after_from = False
supports_select_union = True
supports_select_intersection = True
supports_select_difference = True
supports_slicing_ordering_in_compound = False
supports_aggregate_filter_clause = False
supports_index_on_text_field = True
supports_over_clause = False
supports_cast_with_precision = True
time_cast_precision = 6
create_test_procedure_without_params_sql = None
create_test_procedure_with_int_param_sql = None
# Does the backend support keyword parameters for cursor.callproc()?
supports_callproc_kwargs = False
# Convert CharField results from bytes to str in database functions.
db_functions_convert_bytes_to_str = False
# What formats does the backend EXPLAIN syntax support?
supported_explain_formats = set()
# Does DatabaseOperations.explain_query_prefix() raise ValueError if
# unknown kwargs are passed to QuerySet.explain()?
validates_explain_options = True
# Does the backend support the default parameter in lead() and lag()?
supports_default_in_lead_lag = True
# Does the backend support ignoring constraint or uniqueness errors during
# INSERT?
supports_ignore_conflicts = True
# Does this backend require casting the results of CASE expressions used
# in UPDATE statements to ensure the expression has the correct type?
requires_casted_case_in_updates = False
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_explaining_query_execution(self):
return self.connection.ops.explain_prefix is not None
@cached_property
def supports_transactions(self):
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection.set_autocommit(False)
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection.rollback()
self.connection.set_autocommit(True)
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
return count == 0
@cached_property
def supports_stddev(self):
try:
self.connection.ops.check_expression_support(StdDev(1))
except NotSupportedError:
return False
return True
| true | true |
f724d089bc635ac3025f0392ab99da036fdef499 | 3,249 | py | Python | main.py | deeso/slow-hitter | 1fd3c7effaf532a828f30908715157b188ef5884 | [
"Apache-2.0"
] | null | null | null | main.py | deeso/slow-hitter | 1fd3c7effaf532a828f30908715157b188ef5884 | [
"Apache-2.0"
] | null | null | null | main.py | deeso/slow-hitter | 1fd3c7effaf532a828f30908715157b188ef5884 | [
"Apache-2.0"
] | null | null | null | import logging
import argparse
import sys
from slow.hitter import HitterService as Hitter
from slow.hitter import KnownHosts
from slow.etl import ETL, DEFAULT_NAMES, DEFAULT_PATTERNS, DEFAULT_CONFIG
from slow.mongo_backend import MongoConnection
parser = argparse.ArgumentParser(description='Start syslog-grok-mongo captures.')
parser.add_argument('-name', type=str, default=Hitter.NAME,
help='name of the service')
# Mongo configs
parser.add_argument('-muri', type=str, default='mongo://127.0.0.1:27017',
help='mongo uri')
parser.add_argument('-mdb', type=str, default=MongoConnection.DB_NAME,
help='mongo db name')
# ETL stuff
parser.add_argument('-cpdir', type=str, default=DEFAULT_PATTERNS,
help='directory containing custom grok patterns directory')
parser.add_argument('-names', type=str, default=DEFAULT_NAMES,
help='file containing all the names for rule patterns')
parser.add_argument('-gconfig', type=str, default=DEFAULT_CONFIG,
help='Grok frontend configuration for rule chains')
# Hitter stuff
parser.add_argument('-broker_uri', type=str, default=Hitter.BROKER_URI,
help='kombu queue address')
parser.add_argument('-broker_queue', type=str, default=Hitter.BROKER_QUEUE,
help='kombu queue name to publish to')
parser.add_argument('-buffer_uri', type=str, default=Hitter.BROKER_URI,
help='buffer uri for results')
parser.add_argument('-buffer_queue', type=str, default=Hitter.LOGSTASH_QUEUE,
help='kombu queue for results')
parser.add_argument('-known_hosts', type=str, default=KnownHosts.HOST_FILE,
help='hosts file to load')
parser.add_argument('-msg_limit', type=int, default=100,
help='limit the number of messages')
V = 'log levels: INFO: %d, DEBUG: %d, WARRNING: %d' % (logging.INFO,
logging.DEBUG,
logging.WARNING)
parser.add_argument('-log_level', type=int, default=logging.DEBUG,
help=V)
if __name__ == "__main__":
args = parser.parse_args()
logging.getLogger().setLevel(args.log_level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
logging.getLogger().addHandler(ch)
mongo_backend = MongoConnection(uri=args.muri,
db_name=args.mdb)
ETL.setup_grokker(args)
etl_backend = ETL
service = Hitter(broker_uri=args.broker_uri,
broker_queue=args.broker_queue,
hosts_file=args.known_hosts,
mongo_backend=mongo_backend,
etl_backend=etl_backend,
store_uri=args.buffer_uri,
store_queue=args.buffer_queue,
msg_limit=args.msg_limit)
try:
logging.debug("Starting the syslog listener")
service.serve_forever(poll_interval=0.5)
except (IOError, SystemExit):
raise
except KeyboardInterrupt:
raise
| 39.621951 | 81 | 0.634349 | import logging
import argparse
import sys
from slow.hitter import HitterService as Hitter
from slow.hitter import KnownHosts
from slow.etl import ETL, DEFAULT_NAMES, DEFAULT_PATTERNS, DEFAULT_CONFIG
from slow.mongo_backend import MongoConnection
parser = argparse.ArgumentParser(description='Start syslog-grok-mongo captures.')
parser.add_argument('-name', type=str, default=Hitter.NAME,
help='name of the service')
parser.add_argument('-muri', type=str, default='mongo://127.0.0.1:27017',
help='mongo uri')
parser.add_argument('-mdb', type=str, default=MongoConnection.DB_NAME,
help='mongo db name')
parser.add_argument('-cpdir', type=str, default=DEFAULT_PATTERNS,
help='directory containing custom grok patterns directory')
parser.add_argument('-names', type=str, default=DEFAULT_NAMES,
help='file containing all the names for rule patterns')
parser.add_argument('-gconfig', type=str, default=DEFAULT_CONFIG,
help='Grok frontend configuration for rule chains')
parser.add_argument('-broker_uri', type=str, default=Hitter.BROKER_URI,
help='kombu queue address')
parser.add_argument('-broker_queue', type=str, default=Hitter.BROKER_QUEUE,
help='kombu queue name to publish to')
parser.add_argument('-buffer_uri', type=str, default=Hitter.BROKER_URI,
help='buffer uri for results')
parser.add_argument('-buffer_queue', type=str, default=Hitter.LOGSTASH_QUEUE,
help='kombu queue for results')
parser.add_argument('-known_hosts', type=str, default=KnownHosts.HOST_FILE,
help='hosts file to load')
parser.add_argument('-msg_limit', type=int, default=100,
help='limit the number of messages')
V = 'log levels: INFO: %d, DEBUG: %d, WARRNING: %d' % (logging.INFO,
logging.DEBUG,
logging.WARNING)
parser.add_argument('-log_level', type=int, default=logging.DEBUG,
help=V)
if __name__ == "__main__":
args = parser.parse_args()
logging.getLogger().setLevel(args.log_level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
logging.getLogger().addHandler(ch)
mongo_backend = MongoConnection(uri=args.muri,
db_name=args.mdb)
ETL.setup_grokker(args)
etl_backend = ETL
service = Hitter(broker_uri=args.broker_uri,
broker_queue=args.broker_queue,
hosts_file=args.known_hosts,
mongo_backend=mongo_backend,
etl_backend=etl_backend,
store_uri=args.buffer_uri,
store_queue=args.buffer_queue,
msg_limit=args.msg_limit)
try:
logging.debug("Starting the syslog listener")
service.serve_forever(poll_interval=0.5)
except (IOError, SystemExit):
raise
except KeyboardInterrupt:
raise
| true | true |
f724d09925aef79360ace23f3ceeeecc66e5dc5d | 21,173 | py | Python | infra/libs/gerrit_api/test/gerrit_api_test.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | infra/libs/gerrit_api/test/gerrit_api_test.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | infra/libs/gerrit_api/test/gerrit_api_test.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for gerrit_api.py"""
import copy
import json
import mock
import requests
import tempfile
import time
import unittest
from infra.libs import gerrit_api
GERRIT_JSON_HEADER = ')]}\'\n'
HEADERS = {
'Accept': 'application/json',
'Accept-encoding': 'gzip',
'Authorization': 'Basic Z2l0LWNvbW1pdC1ib3RAY2hyb21pdW0ub3JnOnNlY3JldA==',
}
HEADERS_WITH_CONTENT_TYPE = HEADERS.copy()
HEADERS_WITH_CONTENT_TYPE['Content-Type'] = 'application/json;charset=UTF-8'
TEST_PAYLOAD = {
'labels': {
'Code-Review': 1,
},
'message': 'Test message.',
'notify': 'NONE',
}
TEST_PAYLOAD_LABELS_ONLY = {
'labels': {
'Code-Review': 1,
},
'notify': 'OWNER',
}
TEST_CHANGE_INFO = {
'id': 'project~branch~12345~change',
'change_id': 12345,
'created': '2014-02-11 12:14:28.135200000',
'updated': '2014-03-11 00:20:08.946000000',
'current_revision': 'THIRD',
'owner': {
'name': 'Some Person',
},
'revisions': {
'THIRD': {
'_number': 3,
},
'SECOND': {
'_number': 2,
},
'FIRST': {
'_number': 1,
},
},
'labels': {
'Commit-Queue': {
'recommended': { '_account_id': 1 }
},
'Test-Label': {
'disliked': { '_account_id' : 42 }
},
'Code-Review': {
'approved': { '_account_id': 2 }
},
},
'messages': [
{
'id': 1,
'author': 'test-user@test.org',
'date': '2014-02-11 12:10:14.311200000',
'message': 'MESSAGE1',
},
{
'id': 2,
'date': '2014-02-11 12:11:14.311200000',
'message': 'MESSAGE2',
'_revision_number': 2,
},
],
}
MOCK_AUTH=('git-commit-bot@chromium.org', 'secret')
def _create_mock_return(content, code):
r = requests.Response()
r._content = content
r.status_code = code
return r
# TODO(akuegel): Add more test cases and remove the pragma no covers.
class GerritAgentTestCase(unittest.TestCase):
def setUp(self):
self.gerrit = gerrit_api.Gerrit('chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH))
self.gerrit_read_only = gerrit_api.Gerrit(
'chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH),
read_only=True)
@mock.patch.object(requests.Session, 'request')
def test_request_no_leading_slash(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s[]' % GERRIT_JSON_HEADER, 200)
result = self.gerrit._request(method='GET',
request_path='changes/?q=query:no_results')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'?q=query:no_results'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, (200, []))
@mock.patch.object(gerrit_api.Gerrit, '_sleep')
@mock.patch.object(time, 'time')
@mock.patch.object(requests.Session, 'request')
def test_request_throttled(self, mock_method, time_mock_method, sleep_mock):
gerrit_throttled = gerrit_api.Gerrit('chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH),
0.1)
mock_method.return_value = _create_mock_return(None, 404)
time_mock_method.return_value = 100
gerrit_throttled._request(method='GET', request_path='/accounts/self')
# Call it twice to test the throttling.
gerrit_throttled._request(method='GET', request_path='/accounts/self')
sleep_mock.assert_called_once_with(0)
time_mock_method.return_value = 101
# Call it again after exceeding the throttle to cover the other branch.
gerrit_throttled._request(method='GET', request_path='/accounts/self')
@mock.patch.object(requests.Session, 'request')
def test_get_account(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s{"_account_id":1000096,"name":"John Doe","email":'
'"john.doe@test.com","username":"john"}') % GERRIT_JSON_HEADER,
200)
result = self.gerrit.get_account('self')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url='https://chromium-review.googlesource.com/a/accounts/self',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
expected_result = {
'_account_id': 1000096,
'name': 'John Doe',
'email': 'john.doe@test.com',
'username': 'john'
}
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_get_account_404(self, mock_method):
mock_method.return_value = _create_mock_return(None, 404)
result = self.gerrit.get_account('does.not@exist.com')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com'
'/a/accounts/does.not@exist.com'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, None)
@mock.patch.object(requests.Session, 'request')
def test_get_account_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 201)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.get_account, 'self')
@mock.patch.object(requests.Session, 'request')
def test_list_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"jane.roe@example.com","username": "jane"}]') % GERRIT_JSON_HEADER,
200)
result = self.gerrit.list_group_members('test-group')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': 'jane.roe@example.com',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_list_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.list_group_members, 'test-group')
def test_list_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.list_group_members, 'a/b/c')
@mock.patch.object(requests.Session, 'request')
def test_add_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"jane.roe@example.com","username": "jane"}]') % GERRIT_JSON_HEADER,
200)
members = ['jane.roe@example.com']
payload = { 'members': members }
result = self.gerrit.add_group_members('test-group', members)
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members.add'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': 'jane.roe@example.com',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_add_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.add_group_members, 'test-group', ['a@b.com'])
def test_add_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.add_group_members, 'a/b/c', [])
def test_add_group_members_read_only(self):
self.assertRaises(gerrit_api.AccessViolationException,
self.gerrit_read_only.add_group_members,
'test-group', ['a@b.com'])
@mock.patch.object(requests.Session, 'request')
def test_delete_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"jane.roe@example.com","username": "jane"}]') % GERRIT_JSON_HEADER,
204)
members = ['jane.roe@example.com']
payload = { 'members': members }
result = self.gerrit.delete_group_members('test-group', members)
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members.delete'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': 'jane.roe@example.com',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_delete_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(
gerrit_api.UnexpectedResponseException,
self.gerrit.delete_group_members, 'test-group', ['a@b.com'])
def test_delete_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.delete_group_members, 'a/b/c', [])
def test_delete_group_members_read_only(self):
self.assertRaises(gerrit_api.AccessViolationException,
self.gerrit_read_only.delete_group_members,
'test-group', ['a@b.com'])
@mock.patch.object(requests.Session, 'request')
def test_set_project_parent(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s"parent"' % GERRIT_JSON_HEADER, 200)
result = self.gerrit.set_project_parent('project', 'parent')
payload = {
'parent': 'parent',
'commit_message': 'Changing parent project to parent'
}
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='PUT',
params=None,
url=('https://chromium-review.googlesource.com/a/projects/'
'project/parent'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, 'parent')
@mock.patch.object(requests.Session, 'request')
def test_set_project_parent_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.set_project_parent, 'a', 'b')
@mock.patch.object(requests.Session, 'request')
def test_query(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps([TEST_CHANGE_INFO])), 200)
result = self.gerrit.query(project='test',
with_labels=False, with_revisions=False,
owner='test@chromium.org')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'q':'project:test owner:test@chromium.org', 'o': ['MESSAGES']},
url='https://chromium-review.googlesource.com/a/changes/',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, [TEST_CHANGE_INFO])
@mock.patch.object(requests.Session, 'request')
def test_query_with_query_name(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps([TEST_CHANGE_INFO])), 200)
result = self.gerrit.query(project='test', query_name='pending_cls',
owner='1012155')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'q':'project:test query:pending_cls owner:1012155',
'o': ['CURRENT_REVISION', 'LABELS', 'MESSAGES']},
url='https://chromium-review.googlesource.com/a/changes/',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, [TEST_CHANGE_INFO])
@mock.patch.object(requests.Session, 'request')
def test_query_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.query, 'a', with_messages=False,
with_labels=False, with_revisions=False)
@mock.patch.object(requests.Session, 'request')
def test_get_issue(self, mock_method):
# By default, Gerrit doesn't return revisions data.
info_without_revisions = TEST_CHANGE_INFO.copy()
info_without_revisions.pop('revisions')
info_without_revisions.pop('current_revision')
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info_without_revisions)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info_without_revisions)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_files(self, mock_method):
info_with_files = copy.deepcopy(TEST_CHANGE_INFO)
current = info_with_files['current_revision']
info_with_files['revisions'][current]['files'] = {
"first.py": {
"lines_deleted": 8,
"size_delta": -412,
"size": 7782
},
"first.java": {
"lines_inserted": 1,
"size_delta": 23,
"size": 6762
},
}
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info_with_files)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
current_files=True)
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['CURRENT_FILES', 'CURRENT_REVISION']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info_with_files)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_files_and_revisions(self, mock_method):
info = copy.deepcopy(TEST_CHANGE_INFO)
current = info['current_revision']
info['revisions'][current]['files'] = {
"first.py": {
"lines_deleted": 8,
"size_delta": -412,
"size": 7782
},
"first.java": {
"lines_inserted": 1,
"size_delta": 23,
"size": 6762
},
}
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
current_files=True,
revisions='ALL_REVISIONS')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['CURRENT_FILES', 'ALL_REVISIONS']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_all_revisions(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(TEST_CHANGE_INFO)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
revisions='ALL_REVISIONS')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['ALL_REVISIONS']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, TEST_CHANGE_INFO)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_not_found(self, mock_method):
mock_method.return_value = _create_mock_return('Not found', 404)
result = self.gerrit.get_issue('unknown~branch~hash')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'unknown~branch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, None)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.get_issue, 'issue')
@mock.patch.object(requests.Session, 'request')
def test_set_review(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'labels':{'Code-Review':1}})), 200)
self.gerrit.set_review('change_id', 'revision_id', 'Test message.',
{ 'Code-Review': 1 })
mock_method.assert_called_once_with(
data=json.dumps(TEST_PAYLOAD),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/revision_id/review'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_set_review_only_label(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'labels':{'Code-Review':1}})), 200)
self.gerrit.set_review('change_id', 'revision_id',
labels={ 'Code-Review': 1 }, notify='OWNER')
mock_method.assert_called_once_with(
data=json.dumps(TEST_PAYLOAD_LABELS_ONLY),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/revision_id/review'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_set_review_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.set_review, 'change_id', 'revision_id')
@mock.patch.object(requests.Session, 'request')
def test_submit_revision(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'status': 'MERGE'})), 200)
self.gerrit.submit_revision('change_id', 'current_revision_id')
mock_method.assert_called_once_with(
data=json.dumps({'wait_for_merge': True}),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/current_revision_id/submit'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_submit_revision_revision_conflict(self, mock_method):
mock_method.return_value = _create_mock_return(
'revision revision_id is not current revision', 409)
self.assertRaises(gerrit_api.RevisionConflictException,
self.gerrit.submit_revision, 'change_id', 'revision_id')
@mock.patch.object(requests.Session, 'request')
def test_submit_revision_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.submit_revision, 'change_id', 'revision_id')
| 39.064576 | 80 | 0.654277 |
import copy
import json
import mock
import requests
import tempfile
import time
import unittest
from infra.libs import gerrit_api
GERRIT_JSON_HEADER = ')]}\'\n'
HEADERS = {
'Accept': 'application/json',
'Accept-encoding': 'gzip',
'Authorization': 'Basic Z2l0LWNvbW1pdC1ib3RAY2hyb21pdW0ub3JnOnNlY3JldA==',
}
HEADERS_WITH_CONTENT_TYPE = HEADERS.copy()
HEADERS_WITH_CONTENT_TYPE['Content-Type'] = 'application/json;charset=UTF-8'
TEST_PAYLOAD = {
'labels': {
'Code-Review': 1,
},
'message': 'Test message.',
'notify': 'NONE',
}
TEST_PAYLOAD_LABELS_ONLY = {
'labels': {
'Code-Review': 1,
},
'notify': 'OWNER',
}
TEST_CHANGE_INFO = {
'id': 'project~branch~12345~change',
'change_id': 12345,
'created': '2014-02-11 12:14:28.135200000',
'updated': '2014-03-11 00:20:08.946000000',
'current_revision': 'THIRD',
'owner': {
'name': 'Some Person',
},
'revisions': {
'THIRD': {
'_number': 3,
},
'SECOND': {
'_number': 2,
},
'FIRST': {
'_number': 1,
},
},
'labels': {
'Commit-Queue': {
'recommended': { '_account_id': 1 }
},
'Test-Label': {
'disliked': { '_account_id' : 42 }
},
'Code-Review': {
'approved': { '_account_id': 2 }
},
},
'messages': [
{
'id': 1,
'author': 'test-user@test.org',
'date': '2014-02-11 12:10:14.311200000',
'message': 'MESSAGE1',
},
{
'id': 2,
'date': '2014-02-11 12:11:14.311200000',
'message': 'MESSAGE2',
'_revision_number': 2,
},
],
}
MOCK_AUTH=('git-commit-bot@chromium.org', 'secret')
def _create_mock_return(content, code):
r = requests.Response()
r._content = content
r.status_code = code
return r
# TODO(akuegel): Add more test cases and remove the pragma no covers.
class GerritAgentTestCase(unittest.TestCase):
def setUp(self):
self.gerrit = gerrit_api.Gerrit('chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH))
self.gerrit_read_only = gerrit_api.Gerrit(
'chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH),
read_only=True)
@mock.patch.object(requests.Session, 'request')
def test_request_no_leading_slash(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s[]' % GERRIT_JSON_HEADER, 200)
result = self.gerrit._request(method='GET',
request_path='changes/?q=query:no_results')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'?q=query:no_results'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, (200, []))
@mock.patch.object(gerrit_api.Gerrit, '_sleep')
@mock.patch.object(time, 'time')
@mock.patch.object(requests.Session, 'request')
def test_request_throttled(self, mock_method, time_mock_method, sleep_mock):
gerrit_throttled = gerrit_api.Gerrit('chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH),
0.1)
mock_method.return_value = _create_mock_return(None, 404)
time_mock_method.return_value = 100
gerrit_throttled._request(method='GET', request_path='/accounts/self')
# Call it twice to test the throttling.
gerrit_throttled._request(method='GET', request_path='/accounts/self')
sleep_mock.assert_called_once_with(0)
time_mock_method.return_value = 101
# Call it again after exceeding the throttle to cover the other branch.
gerrit_throttled._request(method='GET', request_path='/accounts/self')
@mock.patch.object(requests.Session, 'request')
def test_get_account(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s{"_account_id":1000096,"name":"John Doe","email":'
'"john.doe@test.com","username":"john"}') % GERRIT_JSON_HEADER,
200)
result = self.gerrit.get_account('self')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url='https://chromium-review.googlesource.com/a/accounts/self',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
expected_result = {
'_account_id': 1000096,
'name': 'John Doe',
'email': 'john.doe@test.com',
'username': 'john'
}
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_get_account_404(self, mock_method):
mock_method.return_value = _create_mock_return(None, 404)
result = self.gerrit.get_account('does.not@exist.com')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com'
'/a/accounts/does.not@exist.com'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, None)
@mock.patch.object(requests.Session, 'request')
def test_get_account_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 201)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.get_account, 'self')
@mock.patch.object(requests.Session, 'request')
def test_list_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"jane.roe@example.com","username": "jane"}]') % GERRIT_JSON_HEADER,
200)
result = self.gerrit.list_group_members('test-group')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': 'jane.roe@example.com',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_list_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.list_group_members, 'test-group')
def test_list_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.list_group_members, 'a/b/c')
@mock.patch.object(requests.Session, 'request')
def test_add_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"jane.roe@example.com","username": "jane"}]') % GERRIT_JSON_HEADER,
200)
members = ['jane.roe@example.com']
payload = { 'members': members }
result = self.gerrit.add_group_members('test-group', members)
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members.add'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': 'jane.roe@example.com',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_add_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.add_group_members, 'test-group', ['a@b.com'])
def test_add_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.add_group_members, 'a/b/c', [])
def test_add_group_members_read_only(self):
self.assertRaises(gerrit_api.AccessViolationException,
self.gerrit_read_only.add_group_members,
'test-group', ['a@b.com'])
@mock.patch.object(requests.Session, 'request')
def test_delete_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"jane.roe@example.com","username": "jane"}]') % GERRIT_JSON_HEADER,
204)
members = ['jane.roe@example.com']
payload = { 'members': members }
result = self.gerrit.delete_group_members('test-group', members)
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members.delete'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': 'jane.roe@example.com',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_delete_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(
gerrit_api.UnexpectedResponseException,
self.gerrit.delete_group_members, 'test-group', ['a@b.com'])
def test_delete_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.delete_group_members, 'a/b/c', [])
def test_delete_group_members_read_only(self):
self.assertRaises(gerrit_api.AccessViolationException,
self.gerrit_read_only.delete_group_members,
'test-group', ['a@b.com'])
@mock.patch.object(requests.Session, 'request')
def test_set_project_parent(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s"parent"' % GERRIT_JSON_HEADER, 200)
result = self.gerrit.set_project_parent('project', 'parent')
payload = {
'parent': 'parent',
'commit_message': 'Changing parent project to parent'
}
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='PUT',
params=None,
url=('https://chromium-review.googlesource.com/a/projects/'
'project/parent'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, 'parent')
@mock.patch.object(requests.Session, 'request')
def test_set_project_parent_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.set_project_parent, 'a', 'b')
@mock.patch.object(requests.Session, 'request')
def test_query(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps([TEST_CHANGE_INFO])), 200)
result = self.gerrit.query(project='test',
with_labels=False, with_revisions=False,
owner='test@chromium.org')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'q':'project:test owner:test@chromium.org', 'o': ['MESSAGES']},
url='https://chromium-review.googlesource.com/a/changes/',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, [TEST_CHANGE_INFO])
@mock.patch.object(requests.Session, 'request')
def test_query_with_query_name(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps([TEST_CHANGE_INFO])), 200)
result = self.gerrit.query(project='test', query_name='pending_cls',
owner='1012155')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'q':'project:test query:pending_cls owner:1012155',
'o': ['CURRENT_REVISION', 'LABELS', 'MESSAGES']},
url='https://chromium-review.googlesource.com/a/changes/',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, [TEST_CHANGE_INFO])
@mock.patch.object(requests.Session, 'request')
def test_query_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.query, 'a', with_messages=False,
with_labels=False, with_revisions=False)
@mock.patch.object(requests.Session, 'request')
def test_get_issue(self, mock_method):
# By default, Gerrit doesn't return revisions data.
info_without_revisions = TEST_CHANGE_INFO.copy()
info_without_revisions.pop('revisions')
info_without_revisions.pop('current_revision')
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info_without_revisions)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info_without_revisions)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_files(self, mock_method):
info_with_files = copy.deepcopy(TEST_CHANGE_INFO)
current = info_with_files['current_revision']
info_with_files['revisions'][current]['files'] = {
"first.py": {
"lines_deleted": 8,
"size_delta": -412,
"size": 7782
},
"first.java": {
"lines_inserted": 1,
"size_delta": 23,
"size": 6762
},
}
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info_with_files)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
current_files=True)
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['CURRENT_FILES', 'CURRENT_REVISION']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info_with_files)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_files_and_revisions(self, mock_method):
info = copy.deepcopy(TEST_CHANGE_INFO)
current = info['current_revision']
info['revisions'][current]['files'] = {
"first.py": {
"lines_deleted": 8,
"size_delta": -412,
"size": 7782
},
"first.java": {
"lines_inserted": 1,
"size_delta": 23,
"size": 6762
},
}
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
current_files=True,
revisions='ALL_REVISIONS')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['CURRENT_FILES', 'ALL_REVISIONS']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_all_revisions(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(TEST_CHANGE_INFO)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
revisions='ALL_REVISIONS')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['ALL_REVISIONS']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, TEST_CHANGE_INFO)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_not_found(self, mock_method):
mock_method.return_value = _create_mock_return('Not found', 404)
result = self.gerrit.get_issue('unknown~branch~hash')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'unknown~branch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, None)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.get_issue, 'issue')
@mock.patch.object(requests.Session, 'request')
def test_set_review(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'labels':{'Code-Review':1}})), 200)
self.gerrit.set_review('change_id', 'revision_id', 'Test message.',
{ 'Code-Review': 1 })
mock_method.assert_called_once_with(
data=json.dumps(TEST_PAYLOAD),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/revision_id/review'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_set_review_only_label(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'labels':{'Code-Review':1}})), 200)
self.gerrit.set_review('change_id', 'revision_id',
labels={ 'Code-Review': 1 }, notify='OWNER')
mock_method.assert_called_once_with(
data=json.dumps(TEST_PAYLOAD_LABELS_ONLY),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/revision_id/review'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_set_review_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.set_review, 'change_id', 'revision_id')
@mock.patch.object(requests.Session, 'request')
def test_submit_revision(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'status': 'MERGE'})), 200)
self.gerrit.submit_revision('change_id', 'current_revision_id')
mock_method.assert_called_once_with(
data=json.dumps({'wait_for_merge': True}),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/current_revision_id/submit'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_submit_revision_revision_conflict(self, mock_method):
mock_method.return_value = _create_mock_return(
'revision revision_id is not current revision', 409)
self.assertRaises(gerrit_api.RevisionConflictException,
self.gerrit.submit_revision, 'change_id', 'revision_id')
@mock.patch.object(requests.Session, 'request')
def test_submit_revision_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.submit_revision, 'change_id', 'revision_id')
| true | true |
f724d0f2012370079322010867b41194ad671123 | 330 | py | Python | Python-3/basic_examples/strings/trim-string.py | ghiloufibelgacem/jornaldev | b9b27f9f7da595892520314b4ed1d2675556310a | [
"MIT"
] | 1,139 | 2018-05-09T11:54:36.000Z | 2022-03-31T06:52:50.000Z | Python-3/basic_examples/strings/trim-string.py | ghiloufibelgacem/jornaldev | b9b27f9f7da595892520314b4ed1d2675556310a | [
"MIT"
] | 56 | 2018-06-20T03:52:53.000Z | 2022-02-09T22:57:41.000Z | Python-3/basic_examples/strings/trim-string.py | ghiloufibelgacem/jornaldev | b9b27f9f7da595892520314b4ed1d2675556310a | [
"MIT"
] | 2,058 | 2018-05-09T09:32:17.000Z | 2022-03-29T13:19:42.000Z | s1 = ' abc '
print(f'String =\'{s1}\'')
print(f'After Removing Leading Whitespaces String =\'{s1.lstrip()}\'')
print(f'After Removing Trailing Whitespaces String =\'{s1.rstrip()}\'')
print(f'After Trimming Whitespaces String =\'{s1.strip()}\'')
# string with new line
s1 = ' X\n Y \nZ \t'
print(s1)
print(s1.strip())
| 19.411765 | 71 | 0.633333 | s1 = ' abc '
print(f'String =\'{s1}\'')
print(f'After Removing Leading Whitespaces String =\'{s1.lstrip()}\'')
print(f'After Removing Trailing Whitespaces String =\'{s1.rstrip()}\'')
print(f'After Trimming Whitespaces String =\'{s1.strip()}\'')
s1 = ' X\n Y \nZ \t'
print(s1)
print(s1.strip())
| true | true |
f724d12f6d6351caa87e074ea046e25613b6fe8c | 413 | py | Python | Task1E.py | ginnylaw/138-floodwarningsystem | dc9b674c5517761904062c5b35729d8f14504c48 | [
"MIT"
] | null | null | null | Task1E.py | ginnylaw/138-floodwarningsystem | dc9b674c5517761904062c5b35729d8f14504c48 | [
"MIT"
] | 1 | 2022-01-21T22:07:02.000Z | 2022-01-22T11:19:31.000Z | Task1E.py | ginnylaw/138-floodwarningsystem | dc9b674c5517761904062c5b35729d8f14504c48 | [
"MIT"
] | null | null | null | # Not Copyright (¬C) 2022 Greg S. Kurzepa
from floodsystem.geo import rivers_by_station_number
from floodsystem.stationdata import build_station_list
def run():
"""Requirements for Task 1E"""
station_list = build_station_list()
output = rivers_by_station_number(station_list, 9)
print(output)
if __name__ == "__main__":
print("*** Task 1E: CUED Part IA Flood Warning System ***")
run() | 27.533333 | 63 | 0.72155 |
from floodsystem.geo import rivers_by_station_number
from floodsystem.stationdata import build_station_list
def run():
station_list = build_station_list()
output = rivers_by_station_number(station_list, 9)
print(output)
if __name__ == "__main__":
print("*** Task 1E: CUED Part IA Flood Warning System ***")
run() | true | true |
f724d145f5fb4bdcfe48b20384224152e82d9a51 | 127 | py | Python | oss4blog/__init__.py | JianxunRao/oss4blog | 9e328ad5d2bc23806ef1c4d149f0bcc916674d03 | [
"MIT"
] | 3 | 2019-01-02T03:00:17.000Z | 2021-06-06T02:00:44.000Z | oss4blog/__init__.py | JianxunRao/oss4blog | 9e328ad5d2bc23806ef1c4d149f0bcc916674d03 | [
"MIT"
] | null | null | null | oss4blog/__init__.py | JianxunRao/oss4blog | 9e328ad5d2bc23806ef1c4d149f0bcc916674d03 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/2/5 0005 上午 8:56
# @Author : Trojx
# @File : __init__.py.py | 25.4 | 34 | 0.559055 | true | true | |
f724d19652f09efe12713994a7c76259c5afea06 | 3,189 | py | Python | ParlAI/parlai/tasks/mutualfriends/agents.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 163 | 2019-06-23T14:07:57.000Z | 2022-02-25T23:06:07.000Z | ParlAI/parlai/tasks/mutualfriends/agents.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 8 | 2019-07-24T12:41:31.000Z | 2022-02-10T00:17:20.000Z | ParlAI/parlai/tasks/mutualfriends/agents.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 31 | 2019-06-26T01:21:07.000Z | 2021-09-06T17:23:24.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import DialogTeacher
from .build import build
import json
import os
class DefaultTeacher(DialogTeacher):
"""MutualFriends dataset."""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if not opt['datatype'].startswith('train'):
raise RuntimeError('MutualFriends only has a training set.')
opt['datafile'] = os.path.join(opt['datapath'], 'MutualFriends', 'data.json')
self.id = 'mutualfriends'
super().__init__(opt, shared)
def act(self):
"""Use DialogTeacher act but set id to "Teacher" for intro message."""
reply = super().act()
if reply.get('text', '').startswith('You have the following friends'):
reply['id'] = 'Teacher'
return reply
def setup_data(self, path):
"""Load json data of conversations."""
print('loading: ' + path)
with open(path) as data_file:
self.loaded_data = json.load(data_file)
for ex in self.loaded_data:
if len(ex['events']) > 0:
# TODO: add reverse conversation as well
curr_agent = ex['events'][0]['agent']
conversation = [
(
'You have the following friends:\n'
+ '\n'.join(
', '.join('{}={}'.format(k, v) for k, v in person.items())
for person in ex['scenario']['kbs'][int(curr_agent)]
)
+ '\nTry to find out which friend the other person has in common.'
)
]
curr = ''
idx = 0
while idx < len(ex['events']):
msg = ex['events'][idx]['data']
if type(msg) == dict:
msg = 'SELECT({})'.format(
', '.join('{}={}'.format(k, v) for k, v in msg.items())
)
next_agent = ex['events'][idx]['agent']
if curr_agent == next_agent:
curr += '\n' + msg
curr = curr.strip()
else:
conversation.append(curr)
curr = msg
curr_agent = next_agent
idx += 1
conversation.append(curr)
for i in range(0, len(conversation), 2):
if i + 1 < len(conversation) - 1:
yield (conversation[i], [conversation[i + 1]]), i == 0
elif i + 1 == len(conversation) - 1:
yield (
(conversation[i], [conversation[i + 1]], ex['outcome']),
False,
)
else:
yield (conversation[i], None, ex['outcome']), False
| 39.37037 | 90 | 0.461587 |
from parlai.core.teachers import DialogTeacher
from .build import build
import json
import os
class DefaultTeacher(DialogTeacher):
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if not opt['datatype'].startswith('train'):
raise RuntimeError('MutualFriends only has a training set.')
opt['datafile'] = os.path.join(opt['datapath'], 'MutualFriends', 'data.json')
self.id = 'mutualfriends'
super().__init__(opt, shared)
def act(self):
reply = super().act()
if reply.get('text', '').startswith('You have the following friends'):
reply['id'] = 'Teacher'
return reply
def setup_data(self, path):
print('loading: ' + path)
with open(path) as data_file:
self.loaded_data = json.load(data_file)
for ex in self.loaded_data:
if len(ex['events']) > 0:
curr_agent = ex['events'][0]['agent']
conversation = [
(
'You have the following friends:\n'
+ '\n'.join(
', '.join('{}={}'.format(k, v) for k, v in person.items())
for person in ex['scenario']['kbs'][int(curr_agent)]
)
+ '\nTry to find out which friend the other person has in common.'
)
]
curr = ''
idx = 0
while idx < len(ex['events']):
msg = ex['events'][idx]['data']
if type(msg) == dict:
msg = 'SELECT({})'.format(
', '.join('{}={}'.format(k, v) for k, v in msg.items())
)
next_agent = ex['events'][idx]['agent']
if curr_agent == next_agent:
curr += '\n' + msg
curr = curr.strip()
else:
conversation.append(curr)
curr = msg
curr_agent = next_agent
idx += 1
conversation.append(curr)
for i in range(0, len(conversation), 2):
if i + 1 < len(conversation) - 1:
yield (conversation[i], [conversation[i + 1]]), i == 0
elif i + 1 == len(conversation) - 1:
yield (
(conversation[i], [conversation[i + 1]], ex['outcome']),
False,
)
else:
yield (conversation[i], None, ex['outcome']), False
| true | true |
f724d1efb6cc2a309577cdfab02d22ed387da3a1 | 6,306 | py | Python | py-polars/polars/utils.py | JakobGM/polars | fe10d4a180e59e5e34f4ab17303f12f1cd64e6c8 | [
"MIT"
] | null | null | null | py-polars/polars/utils.py | JakobGM/polars | fe10d4a180e59e5e34f4ab17303f12f1cd64e6c8 | [
"MIT"
] | null | null | null | py-polars/polars/utils.py | JakobGM/polars | fe10d4a180e59e5e34f4ab17303f12f1cd64e6c8 | [
"MIT"
] | null | null | null | import ctypes
import os
import sys
from datetime import date, datetime, timedelta, timezone
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
from polars.datatypes import DataType, Date, Datetime
if sys.version_info >= (3, 10):
from typing import TypeGuard
else:
from typing_extensions import TypeGuard # pragma: no cover
def _process_null_values(
null_values: Union[None, str, List[str], Dict[str, str]] = None,
) -> Union[None, str, List[str], List[Tuple[str, str]]]:
if isinstance(null_values, dict):
return list(null_values.items())
else:
return null_values
# https://stackoverflow.com/questions/4355524/getting-data-from-ctypes-array-into-numpy
def _ptr_to_numpy(ptr: int, len: int, ptr_type: Any) -> np.ndarray:
"""
Parameters
----------
ptr
C/Rust ptr casted to usize.
len
Length of the array values.
ptr_type
Example:
f32: ctypes.c_float)
Returns
-------
View of memory block as numpy array.
"""
ptr_ctype = ctypes.cast(ptr, ctypes.POINTER(ptr_type))
return np.ctypeslib.as_array(ptr_ctype, (len,))
def _timedelta_to_pl_duration(td: timedelta) -> str:
return f"{td.days}d{td.seconds}s{td.microseconds}us"
def in_nanoseconds_window(dt: datetime) -> bool:
return 1386 < dt.year < 2554
def timedelta_in_nanoseconds_window(td: timedelta) -> bool:
return in_nanoseconds_window(datetime(1970, 1, 1) + td)
def _datetime_to_pl_timestamp(dt: datetime, tu: Optional[str]) -> int:
"""
Converts a python datetime to a timestamp in nanoseconds
"""
if tu == "ns":
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e9)
elif tu == "us":
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e6)
elif tu == "ms":
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e3)
if tu is None:
# python has us precision
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e6)
else:
raise ValueError("expected on of {'ns', 'ms'}")
def _timedelta_to_pl_timedelta(td: timedelta, tu: Optional[str] = None) -> int:
if tu == "ns":
return int(td.total_seconds() * 1e9)
elif tu == "us":
return int(td.total_seconds() * 1e6)
elif tu == "ms":
return int(td.total_seconds() * 1e3)
if tu is None:
if timedelta_in_nanoseconds_window(td):
return int(td.total_seconds() * 1e9)
else:
return int(td.total_seconds() * 1e3)
else:
raise ValueError("expected one of {'ns', 'us, 'ms'}")
def _date_to_pl_date(d: date) -> int:
dt = datetime.combine(d, datetime.min.time()).replace(tzinfo=timezone.utc)
return int(dt.timestamp()) // (3600 * 24)
def is_str_sequence(
val: Sequence[object], allow_str: bool = False
) -> TypeGuard[Sequence[str]]:
"""
Checks that `val` is a sequence of strings. Note that a single string is a sequence of strings
by definition, use `allow_str=False` to return False on a single string
"""
if (not allow_str) and isinstance(val, str):
return False
return _is_iterable_of(val, Sequence, str)
def is_int_sequence(val: Sequence[object]) -> TypeGuard[Sequence[int]]:
return _is_iterable_of(val, Sequence, int)
def _is_iterable_of(val: Iterable, itertype: Type, eltype: Type) -> bool:
return isinstance(val, itertype) and all(isinstance(x, eltype) for x in val)
def range_to_slice(rng: range) -> slice:
step: Optional[int]
# maybe we can slice instead of take by indices
if rng.step != 1:
step = rng.step
else:
step = None
return slice(rng.start, rng.stop, step)
def handle_projection_columns(
columns: Optional[Union[List[str], List[int]]]
) -> Tuple[Optional[List[int]], Optional[List[str]]]:
projection: Optional[List[int]] = None
if columns:
if is_int_sequence(columns):
projection = columns # type: ignore
columns = None
elif not is_str_sequence(columns):
raise ValueError(
"columns arg should contain a list of all integers or all strings values."
)
return projection, columns # type: ignore
def _to_python_timedelta(
value: Union[int, float], tu: Optional[str] = "ns"
) -> timedelta:
if tu == "ns":
return timedelta(microseconds=value // 1e3)
elif tu == "us":
return timedelta(microseconds=value)
elif tu == "ms":
return timedelta(milliseconds=value)
else:
raise ValueError(f"time unit: {tu} not expected")
def _prepare_row_count_args(
row_count_name: Optional[str] = None,
row_count_offset: int = 0,
) -> Optional[Tuple[str, int]]:
if row_count_name is not None:
return (row_count_name, row_count_offset)
else:
return None
EPOCH = datetime(1970, 1, 1).replace(tzinfo=None)
def _to_python_datetime(
value: Union[int, float], dtype: Type[DataType], tu: Optional[str] = "ns"
) -> Union[date, datetime]:
if dtype == Date:
# days to seconds
# important to create from utc. Not doing this leads
# to inconsistencies dependent on the timezone you are in.
return datetime.utcfromtimestamp(value * 3600 * 24).date()
elif dtype == Datetime:
if tu == "ns":
# nanoseconds to seconds
return EPOCH + timedelta(microseconds=value / 1000)
if tu == "us":
return EPOCH + timedelta(microseconds=value)
elif tu == "ms":
# milliseconds to seconds
return datetime.utcfromtimestamp(value / 1_000)
else:
raise ValueError(f"time unit: {tu} not expected")
else:
raise NotImplementedError # pragma: no cover
def _in_notebook() -> bool:
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config: # pragma: no cover
return False
except ImportError:
return False
except AttributeError:
return False
return True
def format_path(path: Union[str, Path]) -> str:
"""
Returnsa string path, expanding the home directory if present.
"""
return os.path.expanduser(path)
| 29.605634 | 98 | 0.64288 | import ctypes
import os
import sys
from datetime import date, datetime, timedelta, timezone
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
from polars.datatypes import DataType, Date, Datetime
if sys.version_info >= (3, 10):
from typing import TypeGuard
else:
from typing_extensions import TypeGuard
def _process_null_values(
null_values: Union[None, str, List[str], Dict[str, str]] = None,
) -> Union[None, str, List[str], List[Tuple[str, str]]]:
if isinstance(null_values, dict):
return list(null_values.items())
else:
return null_values
def _ptr_to_numpy(ptr: int, len: int, ptr_type: Any) -> np.ndarray:
ptr_ctype = ctypes.cast(ptr, ctypes.POINTER(ptr_type))
return np.ctypeslib.as_array(ptr_ctype, (len,))
def _timedelta_to_pl_duration(td: timedelta) -> str:
return f"{td.days}d{td.seconds}s{td.microseconds}us"
def in_nanoseconds_window(dt: datetime) -> bool:
return 1386 < dt.year < 2554
def timedelta_in_nanoseconds_window(td: timedelta) -> bool:
return in_nanoseconds_window(datetime(1970, 1, 1) + td)
def _datetime_to_pl_timestamp(dt: datetime, tu: Optional[str]) -> int:
if tu == "ns":
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e9)
elif tu == "us":
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e6)
elif tu == "ms":
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e3)
if tu is None:
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e6)
else:
raise ValueError("expected on of {'ns', 'ms'}")
def _timedelta_to_pl_timedelta(td: timedelta, tu: Optional[str] = None) -> int:
if tu == "ns":
return int(td.total_seconds() * 1e9)
elif tu == "us":
return int(td.total_seconds() * 1e6)
elif tu == "ms":
return int(td.total_seconds() * 1e3)
if tu is None:
if timedelta_in_nanoseconds_window(td):
return int(td.total_seconds() * 1e9)
else:
return int(td.total_seconds() * 1e3)
else:
raise ValueError("expected one of {'ns', 'us, 'ms'}")
def _date_to_pl_date(d: date) -> int:
dt = datetime.combine(d, datetime.min.time()).replace(tzinfo=timezone.utc)
return int(dt.timestamp()) // (3600 * 24)
def is_str_sequence(
val: Sequence[object], allow_str: bool = False
) -> TypeGuard[Sequence[str]]:
if (not allow_str) and isinstance(val, str):
return False
return _is_iterable_of(val, Sequence, str)
def is_int_sequence(val: Sequence[object]) -> TypeGuard[Sequence[int]]:
return _is_iterable_of(val, Sequence, int)
def _is_iterable_of(val: Iterable, itertype: Type, eltype: Type) -> bool:
return isinstance(val, itertype) and all(isinstance(x, eltype) for x in val)
def range_to_slice(rng: range) -> slice:
step: Optional[int]
# maybe we can slice instead of take by indices
if rng.step != 1:
step = rng.step
else:
step = None
return slice(rng.start, rng.stop, step)
def handle_projection_columns(
columns: Optional[Union[List[str], List[int]]]
) -> Tuple[Optional[List[int]], Optional[List[str]]]:
projection: Optional[List[int]] = None
if columns:
if is_int_sequence(columns):
projection = columns # type: ignore
columns = None
elif not is_str_sequence(columns):
raise ValueError(
"columns arg should contain a list of all integers or all strings values."
)
return projection, columns # type: ignore
def _to_python_timedelta(
value: Union[int, float], tu: Optional[str] = "ns"
) -> timedelta:
if tu == "ns":
return timedelta(microseconds=value // 1e3)
elif tu == "us":
return timedelta(microseconds=value)
elif tu == "ms":
return timedelta(milliseconds=value)
else:
raise ValueError(f"time unit: {tu} not expected")
def _prepare_row_count_args(
row_count_name: Optional[str] = None,
row_count_offset: int = 0,
) -> Optional[Tuple[str, int]]:
if row_count_name is not None:
return (row_count_name, row_count_offset)
else:
return None
EPOCH = datetime(1970, 1, 1).replace(tzinfo=None)
def _to_python_datetime(
value: Union[int, float], dtype: Type[DataType], tu: Optional[str] = "ns"
) -> Union[date, datetime]:
if dtype == Date:
# days to seconds
# important to create from utc. Not doing this leads
# to inconsistencies dependent on the timezone you are in.
return datetime.utcfromtimestamp(value * 3600 * 24).date()
elif dtype == Datetime:
if tu == "ns":
# nanoseconds to seconds
return EPOCH + timedelta(microseconds=value / 1000)
if tu == "us":
return EPOCH + timedelta(microseconds=value)
elif tu == "ms":
# milliseconds to seconds
return datetime.utcfromtimestamp(value / 1_000)
else:
raise ValueError(f"time unit: {tu} not expected")
else:
raise NotImplementedError # pragma: no cover
def _in_notebook() -> bool:
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config: # pragma: no cover
return False
except ImportError:
return False
except AttributeError:
return False
return True
def format_path(path: Union[str, Path]) -> str:
return os.path.expanduser(path)
| true | true |
f724d251d69499fc6e1ec87430fba69964909b5d | 2,310 | py | Python | tests/test_datasets/test_dataset_wrapper.py | pallgeuer/mmpose | d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd | [
"Apache-2.0"
] | 1 | 2022-02-13T12:27:40.000Z | 2022-02-13T12:27:40.000Z | tests/test_datasets/test_dataset_wrapper.py | pallgeuer/mmpose | d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd | [
"Apache-2.0"
] | null | null | null | tests/test_datasets/test_dataset_wrapper.py | pallgeuer/mmpose | d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv import Config
from mmpose.datasets.builder import build_dataset
def test_concat_dataset():
# build COCO-like dataset config
dataset_info = Config.fromfile(
'configs/_base_/datasets/coco.py').dataset_info
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=True,
det_bbox_thr=0.0,
bbox_file='tests/data/coco/test_coco_det_AP_H_56.json',
)
dataset_cfg = dict(
type='TopDownCocoDataset',
ann_file='tests/data/coco/test_coco.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg,
pipeline=[],
dataset_info=dataset_info)
dataset = build_dataset(dataset_cfg)
# Case 1: build ConcatDataset explicitly
concat_dataset_cfg = dict(
type='ConcatDataset', datasets=[dataset_cfg, dataset_cfg])
concat_dataset = build_dataset(concat_dataset_cfg)
assert len(concat_dataset) == 2 * len(dataset)
# Case 2: build ConcatDataset from cfg sequence
concat_dataset = build_dataset([dataset_cfg, dataset_cfg])
assert len(concat_dataset) == 2 * len(dataset)
# Case 3: build ConcatDataset from ann_file sequence
concat_dataset_cfg = dataset_cfg.copy()
for key in ['ann_file', 'type', 'img_prefix', 'dataset_info']:
val = concat_dataset_cfg[key]
concat_dataset_cfg[key] = [val] * 2
for key in ['num_joints', 'dataset_channel']:
val = concat_dataset_cfg['data_cfg'][key]
concat_dataset_cfg['data_cfg'][key] = [val] * 2
concat_dataset = build_dataset(concat_dataset_cfg)
assert len(concat_dataset) == 2 * len(dataset)
| 33.970588 | 71 | 0.646753 |
from mmcv import Config
from mmpose.datasets.builder import build_dataset
def test_concat_dataset():
dataset_info = Config.fromfile(
'configs/_base_/datasets/coco.py').dataset_info
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=True,
det_bbox_thr=0.0,
bbox_file='tests/data/coco/test_coco_det_AP_H_56.json',
)
dataset_cfg = dict(
type='TopDownCocoDataset',
ann_file='tests/data/coco/test_coco.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg,
pipeline=[],
dataset_info=dataset_info)
dataset = build_dataset(dataset_cfg)
concat_dataset_cfg = dict(
type='ConcatDataset', datasets=[dataset_cfg, dataset_cfg])
concat_dataset = build_dataset(concat_dataset_cfg)
assert len(concat_dataset) == 2 * len(dataset)
concat_dataset = build_dataset([dataset_cfg, dataset_cfg])
assert len(concat_dataset) == 2 * len(dataset)
concat_dataset_cfg = dataset_cfg.copy()
for key in ['ann_file', 'type', 'img_prefix', 'dataset_info']:
val = concat_dataset_cfg[key]
concat_dataset_cfg[key] = [val] * 2
for key in ['num_joints', 'dataset_channel']:
val = concat_dataset_cfg['data_cfg'][key]
concat_dataset_cfg['data_cfg'][key] = [val] * 2
concat_dataset = build_dataset(concat_dataset_cfg)
assert len(concat_dataset) == 2 * len(dataset)
| true | true |
f724d3be4fab7267380619189339e046a243a317 | 741 | py | Python | face_recon_deform/PhotoAvatarLib_exe/run.py | halfjoe/3D-Portrait-Stylization | ccf0edd5cf7764d67d2740aa0e2cd18cc503c937 | [
"MIT"
] | 38 | 2022-01-12T14:17:25.000Z | 2022-03-23T06:34:23.000Z | face_recon_deform/PhotoAvatarLib_exe/run.py | halfjoe/3D-Portrait-Stylization | ccf0edd5cf7764d67d2740aa0e2cd18cc503c937 | [
"MIT"
] | 5 | 2022-01-19T12:14:45.000Z | 2022-03-22T15:59:12.000Z | face_recon_deform/PhotoAvatarLib_exe/run.py | halfjoe/3D-Portrait-Stylization | ccf0edd5cf7764d67d2740aa0e2cd18cc503c937 | [
"MIT"
] | 6 | 2022-01-14T06:59:37.000Z | 2022-03-15T03:58:54.000Z | import os
for file in os.listdir("upload"):
if file.endswith(".jpg"):
print(file.rsplit('.', 1)[0])
os.system('PhotoAvatarLib.exe ' + file.rsplit('.', 1)[0])
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '.mtl'), "w")
fp.write('newmtl material_1\nmap_Kd %s_face.jpg' % file.rsplit('.', 1)[0])
fp.close()
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '_face_fit_ortho.obj'), "r")
fstr = fp.read()
fp.close()
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '_face_fit_ortho.obj'), "w")
fp.write('mtllib %s.mtl\nusemtl material_1\n' % file.rsplit('.', 1)[0])
fp.write(fstr)
fp.close()
| 35.285714 | 95 | 0.522267 | import os
for file in os.listdir("upload"):
if file.endswith(".jpg"):
print(file.rsplit('.', 1)[0])
os.system('PhotoAvatarLib.exe ' + file.rsplit('.', 1)[0])
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '.mtl'), "w")
fp.write('newmtl material_1\nmap_Kd %s_face.jpg' % file.rsplit('.', 1)[0])
fp.close()
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '_face_fit_ortho.obj'), "r")
fstr = fp.read()
fp.close()
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '_face_fit_ortho.obj'), "w")
fp.write('mtllib %s.mtl\nusemtl material_1\n' % file.rsplit('.', 1)[0])
fp.write(fstr)
fp.close()
| true | true |
f724d762255165511edcd4f30973356a4b81b6a1 | 964 | py | Python | tests/test_main.py | thorgate/pyevr | 168f2e9459020212213ed0291882a285ebb53839 | [
"MIT"
] | 3 | 2020-04-18T19:45:51.000Z | 2022-03-01T19:48:11.000Z | tests/test_main.py | thorgate/pyevr | 168f2e9459020212213ed0291882a285ebb53839 | [
"MIT"
] | 39 | 2019-11-16T01:35:35.000Z | 2021-11-18T12:58:41.000Z | tests/test_main.py | thorgate/pyevr | 168f2e9459020212213ed0291882a285ebb53839 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pyevr.main`."""
import pytest
from click.testing import CliRunner
from pyevr.main import main
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(main)
assert result.exit_code == 0
assert 'pyevr.cli.main' in result.output
help_result = runner.invoke(main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| 25.368421 | 78 | 0.690871 |
import pytest
from click.testing import CliRunner
from pyevr.main import main
@pytest.fixture
def response():
def test_content(response):
def test_command_line_interface():
runner = CliRunner()
result = runner.invoke(main)
assert result.exit_code == 0
assert 'pyevr.cli.main' in result.output
help_result = runner.invoke(main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| true | true |
f724d7d23d4236fb0d0aeead2ccfc8a44b4b705c | 17,325 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/ios/ios_user.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/ios/ios_user.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/ios/ios_user.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_user
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage the aggregate of local users on Cisco IOS device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the aggregate of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
notes:
- Tested against IOS 15.6
options:
aggregate:
description:
- The set of username objects to be configured on the remote
Cisco IOS device. The list entries can either be the username
or a hash of username and properties. This argument is mutually
exclusive with the C(name) argument.
aliases: ['users', 'collection']
name:
description:
- The username to be configured on the Cisco IOS device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
configured_password:
description:
- The password to be configured on the Cisco IOS device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
password_type:
description:
- This argument determines whether a 'password' or 'secret' will be
configured.
default: secret
choices: ['secret', 'password']
version_added: "2.8"
hashed_password:
description:
- This option allows configuring hashed passwords on Cisco IOS devices.
suboptions:
type:
description:
- Specifies the type of hash (e.g., 5 for MD5, 8 for PBKDF2, etc.)
- For this to work, the device needs to support the desired hash type
type: int
required: True
value:
description:
- The actual hashed password to be configured on the device
required: True
version_added: "2.8"
privilege:
description:
- The C(privilege) argument configures the privilege level of the
user when logged into the system. This argument accepts integer
values in the range of 1 to 15.
view:
description:
- Configures the view for the username in the
device running configuration. The argument accepts a string value
defining the view name. This argument does not check if the view
has been configured on the device.
aliases: ['role']
sshkey:
description:
- Specifies one or more SSH public key(s) to configure
for the given username.
- This argument accepts a valid SSH key value.
version_added: "2.7"
nopassword:
description:
- Defines the username without assigning
a password. This will allow the user to login to the system
without being authenticated by a password.
type: bool
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user (the current defined set of users).
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
extends_documentation_fragment: ios
"""
EXAMPLES = """
- name: create a new user
ios_user:
name: ansible
nopassword: True
sshkey: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
- name: create a new user with multiple keys
ios_user:
name: ansible
sshkey:
- "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
- "{{ lookup('file', '~/path/to/public_key') }}"
state: present
- name: remove all users except admin
ios_user:
purge: yes
- name: remove all users except admin and these listed users
ios_user:
aggregate:
- name: testuser1
- name: testuser2
- name: testuser3
purge: yes
- name: set multiple users to privilege level 15
ios_user:
aggregate:
- name: netop
- name: netend
privilege: 15
state: present
- name: set user view/role
ios_user:
name: netop
view: network-operator
state: present
- name: Change Password for User netop
ios_user:
name: netop
configured_password: "{{ new_password }}"
update_password: always
state: present
- name: Aggregate of users
ios_user:
aggregate:
- name: ansibletest2
- name: ansibletest3
view: network-admin
- name: Add a user specifying password type
ios_user:
name: ansibletest4
configured_password: "{{ new_password }}"
password_type: password
- name: Add a user with MD5 hashed password
ios_user:
name: ansibletest5
hashed_password:
type: 5
value: $3$8JcDilcYgFZi.yz4ApaqkHG2.8/
- name: Delete users with aggregate
ios_user:
aggregate:
- name: ansibletest1
- name: ansibletest2
- name: ansibletest3
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- username ansible secret password
- username admin secret admin
"""
import base64
import hashlib
import re
from copy import deepcopy
from functools import partial
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
from ansible.module_utils.six import iteritems
def validate_privilege(value, module):
if value and not 1 <= value <= 15:
module.fail_json(msg='privilege must be between 1 and 15, got %s' % value)
def user_del_cmd(username):
return {
'command': 'no username %s' % username,
'prompt': 'This operation will remove all username related configurations with same name',
'answer': 'y',
'newline': False,
}
def sshkey_fingerprint(sshkey):
# IOS will accept a MD5 fingerprint of the public key
# and is easier to configure in a single line
# we calculate this fingerprint here
if not sshkey:
return None
if ' ' in sshkey:
# ssh-rsa AAA...== comment
keyparts = sshkey.split(' ')
keyparts[1] = hashlib.md5(base64.b64decode(keyparts[1])).hexdigest().upper()
return ' '.join(keyparts)
else:
# just the key, assume rsa type
return 'ssh-rsa %s' % hashlib.md5(base64.b64decode(sshkey)).hexdigest().upper()
def map_obj_to_commands(updates, module):
commands = list()
update_password = module.params['update_password']
password_type = module.params['password_type']
def needs_update(want, have, x):
return want.get(x) and (want.get(x) != have.get(x))
def add(command, want, x):
command.append('username %s %s' % (want['name'], x))
def add_hashed_password(command, want, x):
command.append('username %s secret %s %s' % (want['name'], x.get('type'),
x.get('value')))
def add_ssh(command, want, x=None):
command.append('ip ssh pubkey-chain')
if x:
command.append('username %s' % want['name'])
for item in x:
command.append('key-hash %s' % item)
command.append('exit')
else:
command.append('no username %s' % want['name'])
command.append('exit')
for update in updates:
want, have = update
if want['state'] == 'absent':
if have['sshkey']:
add_ssh(commands, want)
else:
commands.append(user_del_cmd(want['name']))
if needs_update(want, have, 'view'):
add(commands, want, 'view %s' % want['view'])
if needs_update(want, have, 'privilege'):
add(commands, want, 'privilege %s' % want['privilege'])
if needs_update(want, have, 'sshkey'):
add_ssh(commands, want, want['sshkey'])
if needs_update(want, have, 'configured_password'):
if update_password == 'always' or not have:
if have and password_type != have['password_type']:
module.fail_json(msg='Can not have both a user password and a user secret.' +
' Please choose one or the other.')
add(commands, want, '%s %s' % (password_type, want['configured_password']))
if needs_update(want, have, 'hashed_password'):
add_hashed_password(commands, want, want['hashed_password'])
if needs_update(want, have, 'nopassword'):
if want['nopassword']:
add(commands, want, 'nopassword')
else:
add(commands, want, user_del_cmd(want['name']))
return commands
def parse_view(data):
match = re.search(r'view (\S+)', data, re.M)
if match:
return match.group(1)
def parse_sshkey(data, user):
sshregex = r'username %s(\n\s+key-hash .+$)+' % user
sshcfg = re.search(sshregex, data, re.M)
key_list = []
if sshcfg:
match = re.findall(r'key-hash (\S+ \S+(?: .+)?)$', sshcfg.group(), re.M)
if match:
key_list = match
return key_list
def parse_privilege(data):
match = re.search(r'privilege (\S+)', data, re.M)
if match:
return int(match.group(1))
def parse_password_type(data):
type = None
if data and data.split()[-3] in ['password', 'secret']:
type = data.split()[-3]
return type
def map_config_to_obj(module):
data = get_config(module, flags=['| section username'])
match = re.findall(r'(?:^(?:u|\s{2}u))sername (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r'username %s .+$' % user
cfg = re.findall(regex, data, re.M)
cfg = '\n'.join(cfg)
obj = {
'name': user,
'state': 'present',
'nopassword': 'nopassword' in cfg,
'configured_password': None,
'hashed_password': None,
'password_type': parse_password_type(cfg),
'sshkey': parse_sshkey(data, user),
'privilege': parse_privilege(cfg),
'view': parse_view(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
users = module.params['aggregate']
if not users:
if not module.params['name'] and module.params['purge']:
return list()
elif not module.params['name']:
module.fail_json(msg='username is required')
else:
aggregate = [{'name': module.params['name']}]
else:
aggregate = list()
for item in users:
if not isinstance(item, dict):
aggregate.append({'name': item})
elif 'name' not in item:
module.fail_json(msg='name is required')
else:
aggregate.append(item)
objects = list()
for item in aggregate:
get_value = partial(get_param_value, item=item, module=module)
item['configured_password'] = get_value('configured_password')
item['hashed_password'] = get_value('hashed_password')
item['nopassword'] = get_value('nopassword')
item['privilege'] = get_value('privilege')
item['view'] = get_value('view')
item['sshkey'] = render_key_list(get_value('sshkey'))
item['state'] = get_value('state')
objects.append(item)
return objects
def render_key_list(ssh_keys):
key_list = []
if ssh_keys:
for item in ssh_keys:
key_list.append(sshkey_fingerprint(item))
return key_list
def update_objects(want, have):
updates = list()
for entry in want:
item = next((i for i in have if i['name'] == entry['name']), None)
if all((item is None, entry['state'] == 'present')):
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
""" main entry point for module execution
"""
hashed_password_spec = dict(
type=dict(type='int', required=True),
value=dict(no_log=True, required=True)
)
element_spec = dict(
name=dict(),
configured_password=dict(no_log=True),
hashed_password=dict(no_log=True, type='dict', options=hashed_password_spec),
nopassword=dict(type='bool'),
update_password=dict(default='always', choices=['on_create', 'always']),
password_type=dict(default='secret', choices=['secret', 'password']),
privilege=dict(type='int'),
view=dict(aliases=['role']),
sshkey=dict(type='list'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec, aliases=['users', 'collection']),
purge=dict(type='bool', default=False)
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
mutually_exclusive = [('name', 'aggregate'), ('nopassword', 'hashed_password', 'configured_password')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
if module.params['password'] and not module.params['configured_password']:
warnings.append(
'The "password" argument is used to authenticate the current connection. ' +
'To set a user password use "configured_password" instead.'
)
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['name'] for x in want]
have_users = [x['name'] for x in have]
for item in set(have_users).difference(want_users):
if item != 'admin':
commands.append(user_del_cmd(item))
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| 31.847426 | 110 | 0.633709 |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_user
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage the aggregate of local users on Cisco IOS device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the aggregate of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
notes:
- Tested against IOS 15.6
options:
aggregate:
description:
- The set of username objects to be configured on the remote
Cisco IOS device. The list entries can either be the username
or a hash of username and properties. This argument is mutually
exclusive with the C(name) argument.
aliases: ['users', 'collection']
name:
description:
- The username to be configured on the Cisco IOS device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
configured_password:
description:
- The password to be configured on the Cisco IOS device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
password_type:
description:
- This argument determines whether a 'password' or 'secret' will be
configured.
default: secret
choices: ['secret', 'password']
version_added: "2.8"
hashed_password:
description:
- This option allows configuring hashed passwords on Cisco IOS devices.
suboptions:
type:
description:
- Specifies the type of hash (e.g., 5 for MD5, 8 for PBKDF2, etc.)
- For this to work, the device needs to support the desired hash type
type: int
required: True
value:
description:
- The actual hashed password to be configured on the device
required: True
version_added: "2.8"
privilege:
description:
- The C(privilege) argument configures the privilege level of the
user when logged into the system. This argument accepts integer
values in the range of 1 to 15.
view:
description:
- Configures the view for the username in the
device running configuration. The argument accepts a string value
defining the view name. This argument does not check if the view
has been configured on the device.
aliases: ['role']
sshkey:
description:
- Specifies one or more SSH public key(s) to configure
for the given username.
- This argument accepts a valid SSH key value.
version_added: "2.7"
nopassword:
description:
- Defines the username without assigning
a password. This will allow the user to login to the system
without being authenticated by a password.
type: bool
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user (the current defined set of users).
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
extends_documentation_fragment: ios
"""
EXAMPLES = """
- name: create a new user
ios_user:
name: ansible
nopassword: True
sshkey: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
- name: create a new user with multiple keys
ios_user:
name: ansible
sshkey:
- "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
- "{{ lookup('file', '~/path/to/public_key') }}"
state: present
- name: remove all users except admin
ios_user:
purge: yes
- name: remove all users except admin and these listed users
ios_user:
aggregate:
- name: testuser1
- name: testuser2
- name: testuser3
purge: yes
- name: set multiple users to privilege level 15
ios_user:
aggregate:
- name: netop
- name: netend
privilege: 15
state: present
- name: set user view/role
ios_user:
name: netop
view: network-operator
state: present
- name: Change Password for User netop
ios_user:
name: netop
configured_password: "{{ new_password }}"
update_password: always
state: present
- name: Aggregate of users
ios_user:
aggregate:
- name: ansibletest2
- name: ansibletest3
view: network-admin
- name: Add a user specifying password type
ios_user:
name: ansibletest4
configured_password: "{{ new_password }}"
password_type: password
- name: Add a user with MD5 hashed password
ios_user:
name: ansibletest5
hashed_password:
type: 5
value: $3$8JcDilcYgFZi.yz4ApaqkHG2.8/
- name: Delete users with aggregate
ios_user:
aggregate:
- name: ansibletest1
- name: ansibletest2
- name: ansibletest3
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- username ansible secret password
- username admin secret admin
"""
import base64
import hashlib
import re
from copy import deepcopy
from functools import partial
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
from ansible.module_utils.six import iteritems
def validate_privilege(value, module):
if value and not 1 <= value <= 15:
module.fail_json(msg='privilege must be between 1 and 15, got %s' % value)
def user_del_cmd(username):
return {
'command': 'no username %s' % username,
'prompt': 'This operation will remove all username related configurations with same name',
'answer': 'y',
'newline': False,
}
def sshkey_fingerprint(sshkey):
if not sshkey:
return None
if ' ' in sshkey:
keyparts = sshkey.split(' ')
keyparts[1] = hashlib.md5(base64.b64decode(keyparts[1])).hexdigest().upper()
return ' '.join(keyparts)
else:
return 'ssh-rsa %s' % hashlib.md5(base64.b64decode(sshkey)).hexdigest().upper()
def map_obj_to_commands(updates, module):
commands = list()
update_password = module.params['update_password']
password_type = module.params['password_type']
def needs_update(want, have, x):
return want.get(x) and (want.get(x) != have.get(x))
def add(command, want, x):
command.append('username %s %s' % (want['name'], x))
def add_hashed_password(command, want, x):
command.append('username %s secret %s %s' % (want['name'], x.get('type'),
x.get('value')))
def add_ssh(command, want, x=None):
command.append('ip ssh pubkey-chain')
if x:
command.append('username %s' % want['name'])
for item in x:
command.append('key-hash %s' % item)
command.append('exit')
else:
command.append('no username %s' % want['name'])
command.append('exit')
for update in updates:
want, have = update
if want['state'] == 'absent':
if have['sshkey']:
add_ssh(commands, want)
else:
commands.append(user_del_cmd(want['name']))
if needs_update(want, have, 'view'):
add(commands, want, 'view %s' % want['view'])
if needs_update(want, have, 'privilege'):
add(commands, want, 'privilege %s' % want['privilege'])
if needs_update(want, have, 'sshkey'):
add_ssh(commands, want, want['sshkey'])
if needs_update(want, have, 'configured_password'):
if update_password == 'always' or not have:
if have and password_type != have['password_type']:
module.fail_json(msg='Can not have both a user password and a user secret.' +
' Please choose one or the other.')
add(commands, want, '%s %s' % (password_type, want['configured_password']))
if needs_update(want, have, 'hashed_password'):
add_hashed_password(commands, want, want['hashed_password'])
if needs_update(want, have, 'nopassword'):
if want['nopassword']:
add(commands, want, 'nopassword')
else:
add(commands, want, user_del_cmd(want['name']))
return commands
def parse_view(data):
match = re.search(r'view (\S+)', data, re.M)
if match:
return match.group(1)
def parse_sshkey(data, user):
sshregex = r'username %s(\n\s+key-hash .+$)+' % user
sshcfg = re.search(sshregex, data, re.M)
key_list = []
if sshcfg:
match = re.findall(r'key-hash (\S+ \S+(?: .+)?)$', sshcfg.group(), re.M)
if match:
key_list = match
return key_list
def parse_privilege(data):
match = re.search(r'privilege (\S+)', data, re.M)
if match:
return int(match.group(1))
def parse_password_type(data):
type = None
if data and data.split()[-3] in ['password', 'secret']:
type = data.split()[-3]
return type
def map_config_to_obj(module):
data = get_config(module, flags=['| section username'])
match = re.findall(r'(?:^(?:u|\s{2}u))sername (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r'username %s .+$' % user
cfg = re.findall(regex, data, re.M)
cfg = '\n'.join(cfg)
obj = {
'name': user,
'state': 'present',
'nopassword': 'nopassword' in cfg,
'configured_password': None,
'hashed_password': None,
'password_type': parse_password_type(cfg),
'sshkey': parse_sshkey(data, user),
'privilege': parse_privilege(cfg),
'view': parse_view(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
users = module.params['aggregate']
if not users:
if not module.params['name'] and module.params['purge']:
return list()
elif not module.params['name']:
module.fail_json(msg='username is required')
else:
aggregate = [{'name': module.params['name']}]
else:
aggregate = list()
for item in users:
if not isinstance(item, dict):
aggregate.append({'name': item})
elif 'name' not in item:
module.fail_json(msg='name is required')
else:
aggregate.append(item)
objects = list()
for item in aggregate:
get_value = partial(get_param_value, item=item, module=module)
item['configured_password'] = get_value('configured_password')
item['hashed_password'] = get_value('hashed_password')
item['nopassword'] = get_value('nopassword')
item['privilege'] = get_value('privilege')
item['view'] = get_value('view')
item['sshkey'] = render_key_list(get_value('sshkey'))
item['state'] = get_value('state')
objects.append(item)
return objects
def render_key_list(ssh_keys):
key_list = []
if ssh_keys:
for item in ssh_keys:
key_list.append(sshkey_fingerprint(item))
return key_list
def update_objects(want, have):
updates = list()
for entry in want:
item = next((i for i in have if i['name'] == entry['name']), None)
if all((item is None, entry['state'] == 'present')):
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
hashed_password_spec = dict(
type=dict(type='int', required=True),
value=dict(no_log=True, required=True)
)
element_spec = dict(
name=dict(),
configured_password=dict(no_log=True),
hashed_password=dict(no_log=True, type='dict', options=hashed_password_spec),
nopassword=dict(type='bool'),
update_password=dict(default='always', choices=['on_create', 'always']),
password_type=dict(default='secret', choices=['secret', 'password']),
privilege=dict(type='int'),
view=dict(aliases=['role']),
sshkey=dict(type='list'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec, aliases=['users', 'collection']),
purge=dict(type='bool', default=False)
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
mutually_exclusive = [('name', 'aggregate'), ('nopassword', 'hashed_password', 'configured_password')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
if module.params['password'] and not module.params['configured_password']:
warnings.append(
'The "password" argument is used to authenticate the current connection. ' +
'To set a user password use "configured_password" instead.'
)
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['name'] for x in want]
have_users = [x['name'] for x in have]
for item in set(have_users).difference(want_users):
if item != 'admin':
commands.append(user_del_cmd(item))
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| true | true |
f724d87fd763688168a55ea7c5a6817849d45718 | 125 | py | Python | src/hist/numpy.py | andrzejnovak/hist | 15a41565ac9a3683bff74b98803c4b88ad8a19ae | [
"BSD-3-Clause"
] | null | null | null | src/hist/numpy.py | andrzejnovak/hist | 15a41565ac9a3683bff74b98803c4b88ad8a19ae | [
"BSD-3-Clause"
] | null | null | null | src/hist/numpy.py | andrzejnovak/hist | 15a41565ac9a3683bff74b98803c4b88ad8a19ae | [
"BSD-3-Clause"
] | null | null | null | from boost_histogram.numpy import histogram, histogram2d, histogramdd
__all__ = ("histogram", "histogram2d", "histogramdd")
| 31.25 | 69 | 0.792 | from boost_histogram.numpy import histogram, histogram2d, histogramdd
__all__ = ("histogram", "histogram2d", "histogramdd")
| true | true |
f724d8ab5bf6fadc70a44f28e9bffcf70edecf16 | 1,732 | py | Python | tools/randomData.py | Tandelajr/mr.tandela | 096cce682de58f2a7035d3e114787a78a1015a9b | [
"MIT"
] | 3 | 2020-06-23T11:59:14.000Z | 2020-12-03T15:20:18.000Z | tools/randomData.py | Tandelajr/mr.tandela | 096cce682de58f2a7035d3e114787a78a1015a9b | [
"MIT"
] | 1 | 2020-06-23T12:01:41.000Z | 2020-06-23T12:01:41.000Z | tools/randomData.py | Tandelajr/mr.tandela | 096cce682de58f2a7035d3e114787a78a1015a9b | [
"MIT"
] | 1 | 2020-12-03T15:20:26.000Z | 2020-12-03T15:20:26.000Z | #!/usr/bin/env https://github.com/Tandelajr/mr.tandela
# MIT License
#
# Copyright (C) 2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import random
# Get random IP
def random_IP():
ip = []
for _ in range(0, 4):
ip.append(str(random.randint(1,255)))
return ".".join(ip)
# Get random referer
def random_referer():
with open("tools/other/referers.txt", 'r') as referers:
referers = referers.readlines()
return random.choice(referers)
# Get random user agent
def random_useragent():
with open("tools/other/user_agents.json", 'r') as agents:
user_agents = json.load(agents)["agents"]
return random.choice(user_agents)
| 37.652174 | 80 | 0.741917 |
import json
import random
def random_IP():
ip = []
for _ in range(0, 4):
ip.append(str(random.randint(1,255)))
return ".".join(ip)
def random_referer():
with open("tools/other/referers.txt", 'r') as referers:
referers = referers.readlines()
return random.choice(referers)
def random_useragent():
with open("tools/other/user_agents.json", 'r') as agents:
user_agents = json.load(agents)["agents"]
return random.choice(user_agents)
| true | true |
f724d950f3f0f4ab4df0111f810aec962a3b5e21 | 149,021 | py | Python | scipy/stats/stats.py | Dapid/scipy | dde07a64407ffaa9442b3d8298c6c26ff91fb384 | [
"BSD-3-Clause"
] | null | null | null | scipy/stats/stats.py | Dapid/scipy | dde07a64407ffaa9442b3d8298c6c26ff91fb384 | [
"BSD-3-Clause"
] | null | null | null | scipy/stats/stats.py | Dapid/scipy | dde07a64407ffaa9442b3d8298c6c26ff91fb384 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Moments Handling NaN:
.. autosummary::
:toctree: generated/
nanmean
nanmedian
nanstd
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
from scipy._lib.six import xrange
# Scipy imports.
from scipy._lib.six import callable, string_types
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import futil
from . import distributions
from ._rank import rankdata, tiecorrect
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata', 'nanmean',
'nanstd', 'nanmedian', 'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
return a, b, outaxis
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array
Returns
-------
find_repeats : tuple
Returns a tuple of two 1-D ndarrays. The first ndarray are the repeats
as sorted, unique values that are repeated in `arr`. The second
ndarray are the counts mapped one-to-one of the repeated values
in the first ndarray.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
(array([ 2. ]), array([ 4 ], dtype=int32)
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
(array([ 4., 5.]), array([2, 2], dtype=int32))
"""
v1, v2, n = futil.dfreps(arr)
return v1[:n], v2[:n]
#######
# NAN friendly functions
########
@np.deprecate(message="scipy.stats.nanmean is deprecated in scipy 0.15.0 "
"in favour of numpy.nanmean.")
def nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int or None, optional
Axis along which the mean is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
factor = 1.0 - np.sum(mask, axis) / Norig
x[mask] = 0.0
return np.mean(x, axis) / factor
@np.deprecate(message="scipy.stats.nanstd is deprecated in scipy 0.15 "
"in favour of numpy.nanstd.\nNote that numpy.nanstd "
"has a different signature.")
def nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
Nnan = np.sum(mask, axis) * 1.0
n = Norig - Nnan
x[mask] = 0.0
m1 = np.sum(x, axis) / n
if axis:
d = x - np.expand_dims(m1, axis)
else:
d = x - m1
d *= d
m2 = np.sum(d, axis) - m1 * m1 * Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.0)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
x = arr1d.copy()
c = np.isnan(x)
s = np.where(c)[0]
if s.size == x.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size != 0:
# select non-nans at end of array
enonan = x[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
x = x[:-s.size]
return np.median(x, overwrite_input=True)
@np.deprecate(message="scipy.stats.nanmedian is deprecated in scipy 0.15 "
"in favour of numpy.nanmedian.")
def nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the median is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean, numpy.nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
if hasattr(np, 'nanmedian'): # numpy 1.9 faster for some cases
return np.nanmedian(x, axis)
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
#####################################
# CENTRAL TENDENCY #
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
def mode(a, axis=0):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
vals : ndarray
Array of modal values.
counts : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
[3, 2, 1, 7],
[8, 1, 8, 4],
[5, 3, 0, 5],
[4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify axis=None:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True)):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tmean : float
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = mask_to_limits(a.ravel(), limits, inclusive)
return am.mean()
def masked_var(am):
m = am.mean()
s = ma.add.reduce((am - m)**2)
n = am.count() - 1.0
return s / n
def tvar(a, limits=None, inclusive=(True, True)):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = mask_to_limits(a, limits, inclusive)
return masked_var(am)
def tmin(a, lowerlimit=None, axis=0, inclusive=True):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the whole
array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
Returns
-------
tmin : float
"""
a, axis = _chk_asarray(a, axis)
am = mask_to_limits(a, (lowerlimit, None), (inclusive, False))
return ma.minimum.reduce(am, axis)
def tmax(a, upperlimit=None, axis=0, inclusive=True):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
Returns
-------
tmax : float
"""
a, axis = _chk_asarray(a, axis)
am = mask_to_limits(a, (None, upperlimit), (False, inclusive))
return ma.maximum.reduce(am, axis)
def tstd(a, limits=None, inclusive=(True, True)):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
"""
return np.sqrt(tvar(a, limits, inclusive))
def tsem(a, limits=None, inclusive=(True, True)):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=1) / np.sqrt(a.size)
am = mask_to_limits(a, limits, inclusive)
sd = np.sqrt(masked_var(am))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0):
"""
Calculates the nth moment about the mean for a sample.
Generally used to calculate coefficients of skewness and
kurtosis.
Parameters
----------
a : array_like
data
moment : int, optional
order of central moment that is returned
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
"""
a, axis = _chk_asarray(a, axis)
if moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = np.where(zero, 0, m3 / m2**1.5)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
_DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Biased skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Biased kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom or bias
correction is used.
See Also
--------
skew, kurtosis
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis)
kurt = kurtosis(a, axis)
# Return namedtuple for clarity
return _DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def skewtest(a, axis=0):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
z-score : float
The computed z-score for this test.
p-value : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return Z, 2 * distributions.norm.sf(np.abs(Z))
def kurtosistest(a, axis=0):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns
-------
z-score : float
The computed z-score for this test.
p-value : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
"""
a, axis = _chk_asarray(a, axis)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E) / np.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return Z, 2 * distributions.norm.sf(np.abs(Z))
def normaltest(a, axis=0):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns
-------
k2 : float or array
`s^2 + k^2`, where `s` is the z-score returned by `skewtest` and
`k` is the z-score returned by `kurtosistest`.
p-value : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size," Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Testing for
departures from normality," Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return k2, chisqprob(k2, 2)
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
histogram : ndarray
Number of points (or sum of weights) in each bin.
low_range : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return hist, defaultlimits[0], binsize, extrapoints
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumfreq : ndarray
Binned values of cumulative frequency.
lowerreallimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4)
>>> cumfreqs
array([ 3., 4., 5., 6.])
>>> cumfreqs, lowlim, binsize, extrapoints = \
... stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> cumfreqs
array([ 1., 2., 3., 3.])
>>> extrapoints
3
"""
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return cumhist, l, b, e
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
relfreq : ndarray
Binned values of relative frequency.
lowerreallimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 4, 2, 1, 3, 1])
>>> relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4)
>>> relfreqs
array([ 0.5 , 0.16666667, 0.16666667, 0.16666667])
>>> np.sum(relfreqs) # relative frequencies should add up to 1
0.99999999999999989
"""
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
h = np.array(h / float(np.array(a).shape[0]))
return h, l, b, e
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
# If the arrays are not all the same shape, calling np.array(arrays)
# creates a 1-D array with dtype `object` in numpy 1.6+. In numpy
# 1.5.x, it raises an exception. To work around this, we explicitly
# set the dtype to `object` when the arrays are not all the same shape.
if len(arrays) < 2 or all(x.shape == arrays[0].shape for x in arrays[1:]):
dt = None
else:
dt = object
return np.array(arrays, dtype=dt)
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std nd stats.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the sample
mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of input
array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, 0.1954,
0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``)
to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
[ 0.7149, 0.0775, 0.6072, 0.9656],
[ 0.6341, 0.1403, 0.9759, 0.4064],
[ 0.5918, 0.6948, 0.904 , 0.3721],
[ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to zero
mean and unit variance, where mean and variance are calculated from the
comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
#####################################
# TRIMMING FUNCTIONS #
#####################################
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
c : ndarray
Input array with clipped elements removed.
critlower : float
Lower threshold value use for clipping.
critlupper : float
Upper threshold value use for clipping.
Examples
--------
>>> a = np.concatenate((np.linspace(9.5,10.5,31), np.linspace(0,20,5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5,10.5,11),
np.linspace(-100,-50,3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5,10.5,11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
return c, critlower, critupper
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). You must pre-sort the array if you want
'proper' trimming. Slices off less if proportion results in a
non-integer slice index (i.e., conservatively slices off
`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
sl = [slice(None)] * a.ndim
sl[axis] = slice(lowercut, uppercut)
return a[sl]
def trim1(a, proportiontocut, tail='right'):
"""
Slices off a proportion of items from ONE end of the passed array
distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`
"""
a = asarray(a)
if tail.lower() == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut * len(a))
elif tail.lower() == 'left':
lowercut = int(proportiontocut * len(a))
uppercut = len(a)
return a[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both lower and upper
tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. Slices off LESS if proportion results in a non-integer slice
index (i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if axis is None:
nobs = a.size
else:
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut - 1
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
try:
atmp = np.partition(a, (lowercut, uppercut), axis)
except AttributeError:
atmp = np.sort(a, axis)
newa = trimboth(atmp, proportiontocut, axis=axis)
return np.mean(newa, axis=axis)
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
args = [np.asarray(arg, dtype=float) for arg in args]
na = len(args) # ANOVA on 'na' groups, each in it's own array
alldata = np.concatenate(args)
bign = len(alldata)
sstot = ss(alldata) - (square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += square_of_sums(a) / float(len(a))
ssbn -= (square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = na - 1
dfwn = bign - na
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return f, prob
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as x increases, so does
y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
(Pearson's correlation coefficient,
2-tailed p-value)
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(ss(xm) * ss(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. In R language,
this implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%, if we
adopt that we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
def spearmanr(a, b=None, axis=0):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. Each column of `a` and `b` represents a variable, and
each row entry a single observation of those variables. See also
`axis`. Both arrays need to have the same length in the `axis`
dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=0, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
Returns
-------
rho : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
p-value : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return rs[1,0], prob[1,0]
else:
return rs, prob
def pointbiserialr(x, y):
"""Calculates a point biserial correlation coefficient and the associated
p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
r : float
R value
p-value : float
2-tailed p-value
References
----------
http://en.wikipedia.org/wiki/Point-biserial_correlation_coefficient
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
x = np.asarray(x, dtype=bool)
y = np.asarray(y, dtype=float)
n = len(x)
# phat is the fraction of x values that are True
phat = x.sum() / float(len(x))
y0 = y[~x] # y-values where x is False
y1 = y[x] # y-values where x is True
y0m = y0.mean()
y1m = y1.mean()
# phat - phat**2 is more stable than phat*(1-phat)
rpb = (y1m - y0m) * np.sqrt(phat - phat**2) / y.std()
df = n - 2
# fixme: see comment about TINY in pearsonr()
TINY = 1e-20
t = rpb * np.sqrt(df / ((1.0 - rpb + TINY)*(1.0 + rpb + TINY)))
prob = betai(0.5*df, 0.5, df/(df+t*t))
return rpb, prob
def kendalltau(x, y, initial_lexsort=True):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
Returns
-------
Kendall's tau : float
The tau statistic.
p-value : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if not x.size or not y.size:
return (np.nan, np.nan) # Return NaN if arrays are empty
n = np.int64(len(x))
temp = list(range(n)) # support structure used by mergesort
# this closure recursively sorts sections of perm[] by comparing
# elements of y[perm[]] using temp[] as support
# returns the number of swaps required by an equivalent bubble sort
def mergesort(offs, length):
exchcnt = 0
if length == 1:
return 0
if length == 2:
if y[perm[offs]] <= y[perm[offs+1]]:
return 0
t = perm[offs]
perm[offs] = perm[offs+1]
perm[offs+1] = t
return 1
length0 = length // 2
length1 = length - length0
middle = offs + length0
exchcnt += mergesort(offs, length0)
exchcnt += mergesort(middle, length1)
if y[perm[middle - 1]] < y[perm[middle]]:
return exchcnt
# merging
i = j = k = 0
while j < length0 or k < length1:
if k >= length1 or (j < length0 and y[perm[offs + j]] <=
y[perm[middle + k]]):
temp[i] = perm[offs + j]
d = i - j
j += 1
else:
temp[i] = perm[middle + k]
d = (offs + i) - (middle + k)
k += 1
if d > 0:
exchcnt += d
i += 1
perm[offs:offs+length] = temp[0:length]
return exchcnt
# initial sort on values of x and, if tied, on values of y
if initial_lexsort:
# sort implemented as mergesort, worst case: O(n log(n))
perm = np.lexsort((y, x))
else:
# sort implemented as quicksort, 30% faster but with worst case: O(n^2)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
# compute joint ties
first = 0
t = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:
t += ((i - first) * (i - first - 1)) // 2
first = i
t += ((n - first) * (n - first - 1)) // 2
# compute ties in x
first = 0
u = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]]:
u += ((i - first) * (i - first - 1)) // 2
first = i
u += ((n - first) * (n - first - 1)) // 2
# count exchanges
exchanges = mergesort(0, n)
# compute ties in y after mergesort with counting
first = 0
v = 0
for i in xrange(1, n):
if y[perm[first]] != y[perm[i]]:
v += ((i - first) * (i - first - 1)) // 2
first = i
v += ((n - first) * (n - first - 1)) // 2
tot = (n * (n - 1)) // 2
if tot == u or tot == v:
return np.nan, np.nan # Special case for all ties in both ranks
# Prevent overflow; equal to np.sqrt((tot - u) * (tot - v))
denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v)))
tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return tau, prob
def linregress(x, y=None):
"""
Calculate a regression line
This computes a least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
r-value : float
correlation coefficient
p-value : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimate
Examples
--------
>>> from scipy import stats
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
r-squared: 0.15286643777
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = asarray(x)
y = asarray(y)
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
return slope, intercept, r, prob, sterrest
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
y = np.asarray(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.asarray(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = find_repeats(x)
_, nyreps = find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
#####################################
# INFERENTIAL STATISTICS #
#####################################
def ttest_1samp(a, popmean, axis=0):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
Returns
-------
t : float or array
t-statistic
prob : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return t, prob
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return t, prob
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
t : float or array
The calculated t-statistics
prob : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
return _ttest_ind_from_stats(mean1, mean2, denom, df)
def ttest_ind(a, b, axis=0, equal_var=True):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
Returns
-------
t : float or array
The calculated t-statistic.
prob : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
if a.size == 0 or b.size == 0:
return (np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
return _ttest_ind_from_stats(np.mean(a, axis),
np.mean(b, axis),
denom, df)
def ttest_rel(a, b, axis=0):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
Returns
-------
t : float or array
t-statistic
prob : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return t, prob
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
D : float
KS test statistic, either D, D+ or D-.
p-value : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return Dplus, distributions.ksone.sf(Dplus, N)
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return Dmin, distributions.ksone.sf(Dmin, N)
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return D, distributions.kstwobign.sf(D * np.sqrt(N))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return D, distributions.kstwobign.sf(D * np.sqrt(N))
else:
return D, 2 * distributions.ksone.sf(D, N)
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
stat : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = chisqprob(stat, num_obs - 1 - ddof)
return stat, p
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
D : float
KS statistic
p-value : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
return d, prob
def mannwhitneyu(x, y, use_continuity=True):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
Returns
-------
u : float
The Mann-Whitney statistics.
prob : float
One-sided p-value assuming a asymptotic normal distribution.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
The reported p-value is for a one-sided hypothesis, to get the two-sided
p-value multiply the returned p-value by 2.
"""
x = asarray(x)
y = asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1, u2)
smallu = min(u1, u2)
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
if use_continuity:
# normal approximation for prob calc with continuity correction
z = abs((bigu - 0.5 - n1*n2/2.0) / sd)
else:
z = abs((bigu - n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, distributions.norm.sf(z)
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
z-statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
p-value : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return z, prob
def kruskal(*args):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
Returns
-------
H-statistic : float
The Kruskal-Wallis H statistic, corrected for ties
p-value : float
The p-value for the test using the assumption that H has a chi
square distribution
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
"""
args = list(map(np.asarray, args)) # convert to a numpy array
na = len(args) # Kruskal-Wallis on 'na' groups, each in it's own array
if na < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
n = np.asarray(list(map(len, args)))
alldata = np.concatenate(args)
ranked = rankdata(alldata) # Rank the data
ties = tiecorrect(ranked) # Correct for ties
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(na):
ssbn += square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = na - 1
h /= ties
return h, chisqprob(h, df)
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
friedman chi-square statistic : float
the test statistic, correcting for ties
p-value : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return chisq, chisqprob(chisq, k - 1)
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return special.chdtrc(df, chisq)
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
def ss(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
ss : ndarray
The sum along the given axis for (a**2).
See also
--------
square_of_sums : The square(s) of the sum(s) (the opposite of `ss`).
Examples
--------
>>> from scipy import stats
>>> a = np.array([1., 2., 5.])
>>> stats.ss(a)
30.0
And calculating along an axis:
>>> b = np.array([[1., 2., 5.], [2., 5., 6.]])
>>> stats.ss(b, axis=1)
array([ 30., 65.])
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
ss : The sum of squares (the opposite of `square_of_sums`).
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.square_of_sums(a)
array([ 1600., 2025., 2500., 3025.])
>>> stats.square_of_sums(a, axis=None)
36100.0
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
| 31.699851 | 95 | 0.597949 |
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
from scipy._lib.six import xrange
from scipy._lib.six import callable, string_types
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import futil
from . import distributions
from ._rank import rankdata, tiecorrect
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata', 'nanmean',
'nanstd', 'nanmedian', 'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
return a, b, outaxis
def find_repeats(arr):
v1, v2, n = futil.dfreps(arr)
return v1[:n], v2[:n]
s deprecated in scipy 0.15.0 "
"in favour of numpy.nanmean.")
def nanmean(x, axis=0):
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
factor = 1.0 - np.sum(mask, axis) / Norig
x[mask] = 0.0
return np.mean(x, axis) / factor
@np.deprecate(message="scipy.stats.nanstd is deprecated in scipy 0.15 "
"in favour of numpy.nanstd.\nNote that numpy.nanstd "
"has a different signature.")
def nanstd(x, axis=0, bias=False):
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
Nnan = np.sum(mask, axis) * 1.0
n = Norig - Nnan
x[mask] = 0.0
m1 = np.sum(x, axis) / n
if axis:
d = x - np.expand_dims(m1, axis)
else:
d = x - m1
d *= d
m2 = np.sum(d, axis) - m1 * m1 * Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.0)
return np.sqrt(m2c)
def _nanmedian(arr1d):
x = arr1d.copy()
c = np.isnan(x)
s = np.where(c)[0]
if s.size == x.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size != 0:
enonan = x[-s.size:][~c[-s.size:]]
x[s[:enonan.size]] = enonan
x = x[:-s.size]
return np.median(x, overwrite_input=True)
@np.deprecate(message="scipy.stats.nanmedian is deprecated in scipy 0.15 "
"in favour of numpy.nanmedian.")
def nanmedian(x, axis=0):
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
if hasattr(np, 'nanmedian'):
return np.nanmedian(x, axis)
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
ims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def mask_to_limits(a, limits, inclusive):
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True)):
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = mask_to_limits(a.ravel(), limits, inclusive)
return am.mean()
def masked_var(am):
m = am.mean()
s = ma.add.reduce((am - m)**2)
n = am.count() - 1.0
return s / n
def tvar(a, limits=None, inclusive=(True, True)):
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = mask_to_limits(a, limits, inclusive)
return masked_var(am)
def tmin(a, lowerlimit=None, axis=0, inclusive=True):
a, axis = _chk_asarray(a, axis)
am = mask_to_limits(a, (lowerlimit, None), (inclusive, False))
return ma.minimum.reduce(am, axis)
def tmax(a, upperlimit=None, axis=0, inclusive=True):
a, axis = _chk_asarray(a, axis)
am = mask_to_limits(a, (None, upperlimit), (False, inclusive))
return ma.maximum.reduce(am, axis)
def tstd(a, limits=None, inclusive=(True, True)):
return np.sqrt(tvar(a, limits, inclusive))
def tsem(a, limits=None, inclusive=(True, True)):
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=1) / np.sqrt(a.size)
am = mask_to_limits(a, limits, inclusive)
sd = np.sqrt(masked_var(am))
return sd / np.sqrt(am.count())
ract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True):
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item()
if fisher:
return vals - 3
else:
return vals
_DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1):
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis)
kurt = kurtosis(a, axis)
return _DescribeResult(n, mm, m, v, sk, kurt)
(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
return Z, 2 * distributions.norm.sf(np.abs(Z))
def normaltest(a, axis=0):
a, axis = _chk_asarray(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return k2, chisqprob(k2, 2)
def jarque_bera(x):
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
def histogram2(a, bins):
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
a = np.ravel(a)
if defaultlimits is None:
data_min = a.min()
data_max = a.max()
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
binsize = bin_edges[1] - bin_edges[0]
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return hist, defaultlimits[0], binsize, extrapoints
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return cumhist, l, b, e
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
h = np.array(h / float(np.array(a).shape[0]))
return h, l, b, e
np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
#####################################
# TRIMMING FUNCTIONS #
#####################################
def threshold(a, threshmin=None, threshmax=None, newval=0):
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
def sigmaclip(a, low=4., high=4.):
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
return c, critlower, critupper
def trimboth(a, proportiontocut, axis=0):
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
sl = [slice(None)] * a.ndim
sl[axis] = slice(lowercut, uppercut)
return a[sl]
def trim1(a, proportiontocut, tail='right'):
a = asarray(a)
if tail.lower() == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut * len(a))
elif tail.lower() == 'left':
lowercut = int(proportiontocut * len(a))
uppercut = len(a)
return a[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
a = np.asarray(a)
if axis is None:
nobs = a.size
else:
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut - 1
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
try:
atmp = np.partition(a, (lowercut, uppercut), axis)
except AttributeError:
atmp = np.sort(a, axis)
newa = trimboth(atmp, proportiontocut, axis=axis)
return np.mean(newa, axis=axis)
def f_oneway(*args):
args = [np.asarray(arg, dtype=float) for arg in args]
na = len(args) # ANOVA on 'na' groups, each in it's own array
alldata = np.concatenate(args)
bign = len(alldata)
sstot = ss(alldata) - (square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += square_of_sums(a) / float(len(a))
ssbn -= (square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = na - 1
dfwn = bign - na
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def pearsonr(x, y):
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(ss(xm) * ss(ym))
r = r_num / r_den
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64)
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
def spearmanr(a, b=None, axis=0):
a, axisout = _chk_asarray(a, axis)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore')
try:
t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return rs[1,0], prob[1,0]
else:
return rs, prob
def pointbiserialr(x, y):
x = np.asarray(x, dtype=bool)
y = np.asarray(y, dtype=float)
n = len(x)
phat = x.sum() / float(len(x))
y0 = y[~x]
y1 = y[x]
y0m = y0.mean()
y1m = y1.mean()
rpb = (y1m - y0m) * np.sqrt(phat - phat**2) / y.std()
df = n - 2
TINY = 1e-20
t = rpb * np.sqrt(df / ((1.0 - rpb + TINY)*(1.0 + rpb + TINY)))
prob = betai(0.5*df, 0.5, df/(df+t*t))
return rpb, prob
def kendalltau(x, y, initial_lexsort=True):
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if not x.size or not y.size:
return (np.nan, np.nan)
n = np.int64(len(x))
temp = list(range(n))
def mergesort(offs, length):
exchcnt = 0
if length == 1:
return 0
if length == 2:
if y[perm[offs]] <= y[perm[offs+1]]:
return 0
t = perm[offs]
perm[offs] = perm[offs+1]
perm[offs+1] = t
return 1
length0 = length // 2
length1 = length - length0
middle = offs + length0
exchcnt += mergesort(offs, length0)
exchcnt += mergesort(middle, length1)
if y[perm[middle - 1]] < y[perm[middle]]:
return exchcnt
i = j = k = 0
while j < length0 or k < length1:
if k >= length1 or (j < length0 and y[perm[offs + j]] <=
y[perm[middle + k]]):
temp[i] = perm[offs + j]
d = i - j
j += 1
else:
temp[i] = perm[middle + k]
d = (offs + i) - (middle + k)
k += 1
if d > 0:
exchcnt += d
i += 1
perm[offs:offs+length] = temp[0:length]
return exchcnt
if initial_lexsort:
perm = np.lexsort((y, x))
else:
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
first = 0
t = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:
t += ((i - first) * (i - first - 1)) // 2
first = i
t += ((n - first) * (n - first - 1)) // 2
first = 0
u = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]]:
u += ((i - first) * (i - first - 1)) // 2
first = i
u += ((n - first) * (n - first - 1)) // 2
exchanges = mergesort(0, n)
first = 0
v = 0
for i in xrange(1, n):
if y[perm[first]] != y[perm[i]]:
v += ((i - first) * (i - first - 1)) // 2
first = i
v += ((n - first) * (n - first - 1)) // 2
tot = (n * (n - 1)) // 2
if tot == u or tot == v:
return np.nan, np.nan
denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v)))
tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom
# stats.kendalltau() in SciPy
svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return tau, prob
def linregress(x, y=None):
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = asarray(x)
y = asarray(y)
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
return slope, intercept, r, prob, sterrest
def theilslopes(y, x=None, alpha=0.95):
y = np.asarray(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.asarray(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = find_repeats(x)
_, nyreps = find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
#####################################
# INFERENTIAL STATISTICS #
#####################################
def ttest_1samp(a, popmean, axis=0):
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return t, prob
def _ttest_finish(df, t):
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return t, prob
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
return _ttest_ind_from_stats(mean1, mean2, denom, df)
def ttest_ind(a, b, axis=0, equal_var=True):
a, b, axis = _chk2_asarray(a, b, axis)
if a.size == 0 or b.size == 0:
return (np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
return _ttest_ind_from_stats(np.mean(a, axis),
np.mean(b, axis),
denom, df)
def ttest_rel(a, b, axis=0):
a, b, axis = _chk2_asarray(a, b, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return t, prob
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return Dplus, distributions.ksone.sf(Dplus, N)
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return Dmin, distributions.ksone.sf(Dmin, N)
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return D, distributions.kstwobign.sf(D * np.sqrt(N))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return D, distributions.kstwobign.sf(D * np.sqrt(N))
else:
return D, 2 * distributions.ksone.sf(D, N)
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = chisqprob(stat, num_obs - 1 - ddof)
return stat, p
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
def ks_2samp(data1, data2):
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
return d, prob
def mannwhitneyu(x, y, use_continuity=True):
x = asarray(x)
y = asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1]
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0)
u2 = n1*n2 - u1
bigu = max(u1, u2)
smallu = min(u1, u2)
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
if use_continuity:
z = abs((bigu - 0.5 - n1*n2/2.0) / sd)
else:
z = abs((bigu - n1*n2/2.0) / sd)
return smallu, distributions.norm.sf(z)
def ranksums(x, y):
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return z, prob
def kruskal(*args):
args = list(map(np.asarray, args))
na = len(args)
if na < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
n = np.asarray(list(map(len, args)))
alldata = np.concatenate(args)
ranked = rankdata(alldata) # Rank the data
ties = tiecorrect(ranked) # Correct for ties
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(na):
ssbn += square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = na - 1
h /= ties
return h, chisqprob(h, df)
def friedmanchisquare(*args):
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return chisq, chisqprob(chisq, k - 1)
def combine_pvalues(pvalues, method='fisher', weights=None):
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
def chisqprob(chisq, df):
return special.chdtrc(df, chisq)
def betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
def f_value(ER, EF, dfR, dfF):
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
def f_value_multivariate(ER, EF, dfnum, dfden):
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
def ss(a, axis=0):
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def square_of_sums(a, axis=0):
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
| true | true |
f724da7af2704b4ffec5878bcac55c4bb2e57d18 | 4,446 | py | Python | models/experimental/mnist_keras_ds/mnist.py | cs-gn/tpu | fadb409b8dae2385191050aa5c901d9084d8bb8c | [
"Apache-2.0"
] | 1 | 2020-08-27T18:52:09.000Z | 2020-08-27T18:52:09.000Z | models/experimental/mnist_keras_ds/mnist.py | omar16100/tpu | 4727594874e8587a60cb088627d46f73a1769823 | [
"Apache-2.0"
] | null | null | null | models/experimental/mnist_keras_ds/mnist.py | omar16100/tpu | 4727594874e8587a60cb088627d46f73a1769823 | [
"Apache-2.0"
] | 1 | 2019-03-25T07:50:04.000Z | 2019-03-25T07:50:04.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Experimental Keras MNIST Example.
To test on CPU:
python mnist.py --use_tpu=False [--fake_data=true]
To test on TPU:
python mnist.py --use_tpu=True [--tpu=$TPU_NAME]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
flags.DEFINE_bool('use_tpu', True, 'Use TPU model instead of CPU.')
flags.DEFINE_string('tpu', None, 'Name of the TPU to use.')
flags.DEFINE_string(
'model_dir', None,
('The directory where the model and training/evaluation summaries '
'are stored. If unset, no summaries will be stored.'))
flags.DEFINE_bool('fake_data', False, 'Use fake data to test functionality.')
# Batch size should satify two properties to be able to run in cloud:
# num_eval_samples % batch_size == 0
# batch_size % 8 == 0
BATCH_SIZE = 200
NUM_CLASSES = 10
EPOCHS = 15
# input image dimensions
IMG_ROWS, IMG_COLS = 28, 28
FLAGS = flags.FLAGS
def mnist_model(input_shape):
"""Creates a MNIST model."""
model = tf.keras.models.Sequential()
model.add(
tf.keras.layers.Conv2D(
32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax'))
return model
def run():
"""Run the model training and return evaluation output."""
use_tpu = FLAGS.use_tpu
strategy = None
if use_tpu:
strategy = tf.contrib.distribute.TPUStrategy(
tf.contrib.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu),
steps_per_run=100)
print('Mode:', 'TPU' if use_tpu else 'CPU')
if FLAGS.fake_data:
print('Using fake data')
x_train = np.random.random((BATCH_SIZE, IMG_ROWS, IMG_COLS))
y_train = np.zeros([BATCH_SIZE, 1], dtype=np.int32)
x_test, y_test = x_train, y_train
else:
# the data, split between train and test sets
print('Using real data')
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], IMG_ROWS, IMG_COLS, 1)
x_test = x_test.reshape(x_test.shape[0], IMG_ROWS, IMG_COLS, 1)
input_shape = (IMG_ROWS, IMG_COLS, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = tf.keras.utils.to_categorical(y_test, NUM_CLASSES)
model = mnist_model(input_shape)
model.compile(
loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05),
metrics=['accuracy'],
distribute=strategy)
callbacks = []
if FLAGS.model_dir:
callbacks = [tf.keras.callbacks.TensorBoard(log_dir=FLAGS.model_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
callbacks=callbacks,
epochs=EPOCHS,
verbose=1,
validation_data=(x_test, y_test))
return model.evaluate(x_test, y_test, batch_size=BATCH_SIZE, verbose=1)
def main(unused_dev):
score = run()
print('Loss for final step: %s;' % score[0])
print('Accuracy: %s;' % score[1])
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| 31.531915 | 80 | 0.703554 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
flags.DEFINE_bool('use_tpu', True, 'Use TPU model instead of CPU.')
flags.DEFINE_string('tpu', None, 'Name of the TPU to use.')
flags.DEFINE_string(
'model_dir', None,
('The directory where the model and training/evaluation summaries '
'are stored. If unset, no summaries will be stored.'))
flags.DEFINE_bool('fake_data', False, 'Use fake data to test functionality.')
BATCH_SIZE = 200
NUM_CLASSES = 10
EPOCHS = 15
IMG_ROWS, IMG_COLS = 28, 28
FLAGS = flags.FLAGS
def mnist_model(input_shape):
model = tf.keras.models.Sequential()
model.add(
tf.keras.layers.Conv2D(
32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax'))
return model
def run():
use_tpu = FLAGS.use_tpu
strategy = None
if use_tpu:
strategy = tf.contrib.distribute.TPUStrategy(
tf.contrib.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu),
steps_per_run=100)
print('Mode:', 'TPU' if use_tpu else 'CPU')
if FLAGS.fake_data:
print('Using fake data')
x_train = np.random.random((BATCH_SIZE, IMG_ROWS, IMG_COLS))
y_train = np.zeros([BATCH_SIZE, 1], dtype=np.int32)
x_test, y_test = x_train, y_train
else:
print('Using real data')
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], IMG_ROWS, IMG_COLS, 1)
x_test = x_test.reshape(x_test.shape[0], IMG_ROWS, IMG_COLS, 1)
input_shape = (IMG_ROWS, IMG_COLS, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_train = tf.keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = tf.keras.utils.to_categorical(y_test, NUM_CLASSES)
model = mnist_model(input_shape)
model.compile(
loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05),
metrics=['accuracy'],
distribute=strategy)
callbacks = []
if FLAGS.model_dir:
callbacks = [tf.keras.callbacks.TensorBoard(log_dir=FLAGS.model_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
callbacks=callbacks,
epochs=EPOCHS,
verbose=1,
validation_data=(x_test, y_test))
return model.evaluate(x_test, y_test, batch_size=BATCH_SIZE, verbose=1)
def main(unused_dev):
score = run()
print('Loss for final step: %s;' % score[0])
print('Accuracy: %s;' % score[1])
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| true | true |
f724dabdf285c5d14bf55e9bd7e21f067f7b0934 | 403 | py | Python | award_project/wsgi.py | Esther-Anyona/Developer-Awards | 64030da79cc1ed993b1bc4420725b2a996be84da | [
"MIT"
] | null | null | null | award_project/wsgi.py | Esther-Anyona/Developer-Awards | 64030da79cc1ed993b1bc4420725b2a996be84da | [
"MIT"
] | null | null | null | award_project/wsgi.py | Esther-Anyona/Developer-Awards | 64030da79cc1ed993b1bc4420725b2a996be84da | [
"MIT"
] | null | null | null | """
WSGI config for award_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'award_project.settings')
application = get_wsgi_application()
| 23.705882 | 78 | 0.791563 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'award_project.settings')
application = get_wsgi_application()
| true | true |
f724db24b2380e8d19e2fcdab914785ada4e9c4a | 854 | py | Python | buildlib/helpers/events.py | ForwardLine/backup-nanny | 67c687f43d732c60ab2e569e50bc40cc5e696b25 | [
"Apache-2.0"
] | 1 | 2019-11-13T04:15:41.000Z | 2019-11-13T04:15:41.000Z | buildlib/helpers/events.py | ForwardLine/backup-nanny | 67c687f43d732c60ab2e569e50bc40cc5e696b25 | [
"Apache-2.0"
] | null | null | null | buildlib/helpers/events.py | ForwardLine/backup-nanny | 67c687f43d732c60ab2e569e50bc40cc5e696b25 | [
"Apache-2.0"
] | 1 | 2019-10-25T21:24:20.000Z | 2019-10-25T21:24:20.000Z | import logging
from troposphere.events import Rule, Target
from buildlib.helpers.client_helper import ClientHelper
class EventsHelper(object):
def __init__(self, template, project, session=None):
self.client = ClientHelper.get_client('events', session)
self.project = project
self.template = template
def create_cron_rule(self, schedule_expression, targets, state='ENABLED', name_prefix='', **kwargs):
return self.template.add_resource(Rule(
'{0}Rule'.format(name_prefix),
State=state,
Targets=targets,
ScheduleExpression=schedule_expression,
**kwargs
))
def create_target(self, arn, target_id, name_prefix=''):
return Target(
'{0}Target'.format(name_prefix),
Arn=arn,
Id=target_id
)
| 29.448276 | 104 | 0.637002 | import logging
from troposphere.events import Rule, Target
from buildlib.helpers.client_helper import ClientHelper
class EventsHelper(object):
def __init__(self, template, project, session=None):
self.client = ClientHelper.get_client('events', session)
self.project = project
self.template = template
def create_cron_rule(self, schedule_expression, targets, state='ENABLED', name_prefix='', **kwargs):
return self.template.add_resource(Rule(
'{0}Rule'.format(name_prefix),
State=state,
Targets=targets,
ScheduleExpression=schedule_expression,
**kwargs
))
def create_target(self, arn, target_id, name_prefix=''):
return Target(
'{0}Target'.format(name_prefix),
Arn=arn,
Id=target_id
)
| true | true |
f724db442a0f5748c892e969a3bc7eed6d4c5a14 | 16,050 | py | Python | pybvc/netconfdev/vrouter/interfaces.py | brocade/pybvc | 316e8cb79ecbeb3670276afd43286e57897bc8ba | [
"BSD-3-Clause"
] | 1 | 2015-11-22T15:53:00.000Z | 2015-11-22T15:53:00.000Z | pybvc/netconfdev/vrouter/interfaces.py | brocade/pybvc | 316e8cb79ecbeb3670276afd43286e57897bc8ba | [
"BSD-3-Clause"
] | null | null | null | pybvc/netconfdev/vrouter/interfaces.py | brocade/pybvc | 316e8cb79ecbeb3670276afd43286e57897bc8ba | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2015
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES;LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
firewall.py: Firewall specific properties and access methods
"""
import json
from pybvc.common.utils import strip_none, remove_empty_from_dict, dict_keys_underscored_to_dashed
#-------------------------------------------------------------------------------
# Class 'DataPlaneInterface'
#-------------------------------------------------------------------------------
class DataPlaneInterface():
''' Class representing a dataplane interface '''
def __init__(self, name):
''' Dataplane interface name '''
self.tagnode = name
''' Description for the interface '''
self.description = None
''' DHCPv6 options (container) '''
self.dhcpv6_options = None
''' IPv4 parameters (container) '''
self.ip = None
''' IPv6 parameters (container) '''
self.ipv6 = None
''' Maximum Transmission Unit (MTU) '''
self.mtu = None
''' Disable interface '''
self.disable = None
''' Virtual Interface (VIF) ID (list) '''
self.vif = []
''' Enable/Disable sflow for interface '''
self.sflow = None
''' IP address (list) '''
self.address = []
''' Media Access Control (MAC) address '''
self.mac = None
''' Ignore link state changes '''
self.disable_link_detect = None
''' This interface bridge group (container) '''
self.bridge_group = None
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def to_string(self):
""" Return this object as a string """
return str(vars(self))
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def to_json(self):
""" Return this object as JSON """
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_description(self, description):
self.description = description
# TBD
def set_dhcpv6_options(self, TBD):
pass
# TBD
def set_ipv4_options(self, TBD):
pass
# TBD
def set_ipv6_options(self, TBD):
pass
def set_mtu(self, mtu):
self.mtu = mtu
def set_disable(self, value):
if (value == True):
self.disable = ""
else:
self.disable = None
def set_vif(self, vif_id):
self.vif.append(vif_id)
def set_sflow(self, value):
if (value == True):
self.sflow = ""
else:
self.sflow = None
def set_address(self, address):
self.address.append(address)
def set_mac(self, mac):
self.mac = mac
def set_disable_link_detect(self, value):
if (value == True):
self.disable_link_detect = ""
else:
self.disable_link_detect = None
# TBD
def set_bridge_group(self, TBD):
pass
#-------------------------------------------------------------------------------
# Class 'OpenVpnInterface'
#-------------------------------------------------------------------------------
class OpenVpnInterface():
''' Class representing an OpenVPN tunnel interface '''
_mn1 = "vyatta-interfaces:interfaces"
_mn2 = "vyatta-interfaces-openvpn:openvpn"
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self, name):
''' OpenVPN tunnel interface name '''
self.tagnode = name
''' Description for the interface '''
self.description = None
''' OpenVPN authentication method (container) '''
self.auth = None
''' Hashing algorithm option
enumeration: 'md5', 'sha1', 'sha256', 'sha512' '''
self.hash = None
''' Interface to be disabled '''
self.disable = None
''' Server-mode options (container) '''
self.server = None
''' OpenVPN interface device-type '''
self.device_type = None
''' File containing the secret key shared with remote end of tunnel '''
self.shared_secret_key_file = None
''' Data encryption algorithm option
enumeration: 'des', '3des', 'bf128', 'bf256', 'aes128', 'aes192', 'aes256' '''
self.encryption = None
''' Additional OpenVPN options (list) '''
self.openvpn_option = []
''' Local IP address or network address '''
self.local_address = None
''' Local port number to accept connections (range 1..65535) '''
self.local_port = None
''' Local IP address to accept connections (all if not set) '''
self.local_host = None
''' IP address of remote end of tunnel '''
self.remote_address = None
''' Remote port number to connect to '''
self.remote_port = None
''' Remote host to connect to (dynamic if not set) '''
self.remote_host = []
''' Transport Layer Security (TLS) options (container) '''
self.tls = TlsOptions()
''' OpenVPN mode of operation
enumeration: 'site-to-site', 'client', 'server' '''
self.mode = None
''' OpenVPN tunnel to be used as the default route (container)'''
self.replace_default_route = None
''' OpenVPN communication protocol
enumeration: 'udp', 'tcp-passive', 'tcp-active' '''
self.protocol = None
''' IPv4 parameters (container) '''
self.ip = None
''' IPv6 parameters (container) '''
self.ipv6 = None
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def to_string(self):
""" Return this object as a string """
return str(vars(self))
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def to_json(self):
""" Return this object as JSON """
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def get_payload(self):
""" Return this object as a payload for HTTP request """
s = self.to_json()
obj = json.loads(s)
obj1 = strip_none(obj)
obj2 = remove_empty_from_dict(obj1)
obj3 = dict_keys_underscored_to_dashed(obj2)
payload = {self._mn1: {self._mn2:[obj3]}}
return json.dumps(payload, default=lambda o: o.__dict__, sort_keys=True, indent=4)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_description(self, description):
self.description = description
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_mode(self, mode):
self.mode = mode
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_shared_secret_key_file(self, path):
self.shared_secret_key_file = path
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_local_address(self, addr):
self.local_address = addr
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_remote_address(self, addr):
self.remote_address = addr
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_remote_host(self, addr):
self.remote_host.append(addr)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_tls_role(self, role):
self.tls.set_role(role)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_tls_dh_file(self, path):
self.tls.set_dh_file(path)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_tls_ca_cert_file(self, path):
self.tls.set_ca_cert_file(path)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_tls_cert_file(self, path):
self.tls.set_cert_file(path)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_tls_crl_file(self, path):
self.tls.set_crl_file(path)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_tls_key_file(self, path):
self.tls.set_key_file(path)
#-------------------------------------------------------------------------------
# Class 'TlsOptions'
#-------------------------------------------------------------------------------
class TlsOptions():
''' Transport Layer Security (TLS) options
Helper class of the 'OpenVpnInterface' class '''
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self):
''' Role in TLS negotiation
enumeration: 'active', 'passive' '''
self.role = None
''' File containing Diffie Hellman parameters (server only) '''
self.dh_file = None
''' File containing certificate for Certificate Authority (CA) '''
self.ca_cert_file = None
''' File containing certificate for this host '''
self.cert_file = None
''' File containing certificate revocation list (CRL) for this host '''
self.crl_file = None
''' File containing this host's private key '''
self.key_file = None
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_role(self, role):
self.role = role
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_dh_file(self, path):
self.dh_file = path
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_ca_cert_file(self, path):
self.ca_cert_file = path
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_cert_file(self, path):
self.cert_file = path
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_crl_file(self, path):
self.crl_file = path
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_key_file(self, path):
self.key_file = path
#-------------------------------------------------------------------------------
# Class 'VirtualTunnelInterface'
#-------------------------------------------------------------------------------
class VirtualTunnelInterface():
''' Class representing a Virtual tunnel interface (VTI) '''
def __init__(self, name):
''' Virtual tunnel interface name '''
self.tagnode = name
''' Description for the interface '''
self.description = None
''' Maximum Transmission Unit (MTU), range 68..9000 '''
self.mtu = None
''' Disable this interface '''
self.disable = None
''' IPv4 or IPv6 Prefixes'''
self.address = []
''' IPv4 parameters '''
self.ip = None
''' IPv6 parameters '''
self.ipv6 = None
def to_string(self):
""" Return this object as a string """
return str(vars(self))
def to_json(self):
""" Return this object as JSON """
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def set_description(self, description):
self.description = description
def set_mtu(self, mtu):
self.mtu = mtu
def set_disable(self, value):
if (value == True):
self.disable = ""
else:
self.disable = None
def set_address(self, address):
self.address.append(address)
| 35.666667 | 98 | 0.406417 |
import json
from pybvc.common.utils import strip_none, remove_empty_from_dict, dict_keys_underscored_to_dashed
class DataPlaneInterface():
def __init__(self, name):
self.tagnode = name
self.description = None
self.dhcpv6_options = None
self.ip = None
self.ipv6 = None
self.mtu = None
self.disable = None
self.vif = []
self.sflow = None
self.address = []
self.mac = None
self.disable_link_detect = None
self.bridge_group = None
def to_string(self):
return str(vars(self))
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def set_description(self, description):
self.description = description
def set_dhcpv6_options(self, TBD):
pass
def set_ipv4_options(self, TBD):
pass
def set_ipv6_options(self, TBD):
pass
def set_mtu(self, mtu):
self.mtu = mtu
def set_disable(self, value):
if (value == True):
self.disable = ""
else:
self.disable = None
def set_vif(self, vif_id):
self.vif.append(vif_id)
def set_sflow(self, value):
if (value == True):
self.sflow = ""
else:
self.sflow = None
def set_address(self, address):
self.address.append(address)
def set_mac(self, mac):
self.mac = mac
def set_disable_link_detect(self, value):
if (value == True):
self.disable_link_detect = ""
else:
self.disable_link_detect = None
def set_bridge_group(self, TBD):
pass
class OpenVpnInterface():
_mn1 = "vyatta-interfaces:interfaces"
_mn2 = "vyatta-interfaces-openvpn:openvpn"
def __init__(self, name):
self.tagnode = name
self.description = None
self.auth = None
self.hash = None
self.disable = None
self.server = None
self.device_type = None
self.shared_secret_key_file = None
self.encryption = None
self.openvpn_option = []
self.local_address = None
self.local_port = None
self.local_host = None
self.remote_address = None
self.remote_port = None
self.remote_host = []
self.tls = TlsOptions()
self.mode = None
self.replace_default_route = None
self.protocol = None
self.ip = None
self.ipv6 = None
def to_string(self):
return str(vars(self))
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def get_payload(self):
s = self.to_json()
obj = json.loads(s)
obj1 = strip_none(obj)
obj2 = remove_empty_from_dict(obj1)
obj3 = dict_keys_underscored_to_dashed(obj2)
payload = {self._mn1: {self._mn2:[obj3]}}
return json.dumps(payload, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def set_description(self, description):
self.description = description
def set_mode(self, mode):
self.mode = mode
def set_shared_secret_key_file(self, path):
self.shared_secret_key_file = path
def set_local_address(self, addr):
self.local_address = addr
def set_remote_address(self, addr):
self.remote_address = addr
def set_remote_host(self, addr):
self.remote_host.append(addr)
def set_tls_role(self, role):
self.tls.set_role(role)
def set_tls_dh_file(self, path):
self.tls.set_dh_file(path)
def set_tls_ca_cert_file(self, path):
self.tls.set_ca_cert_file(path)
def set_tls_cert_file(self, path):
self.tls.set_cert_file(path)
def set_tls_crl_file(self, path):
self.tls.set_crl_file(path)
def set_tls_key_file(self, path):
self.tls.set_key_file(path)
class TlsOptions():
def __init__(self):
self.role = None
self.dh_file = None
self.ca_cert_file = None
self.cert_file = None
self.crl_file = None
self.key_file = None
def set_role(self, role):
self.role = role
def set_dh_file(self, path):
self.dh_file = path
def set_ca_cert_file(self, path):
self.ca_cert_file = path
def set_cert_file(self, path):
self.cert_file = path
def set_crl_file(self, path):
self.crl_file = path
def set_key_file(self, path):
self.key_file = path
class VirtualTunnelInterface():
def __init__(self, name):
self.tagnode = name
self.description = None
self.mtu = None
self.disable = None
self.address = []
self.ip = None
self.ipv6 = None
def to_string(self):
return str(vars(self))
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def set_description(self, description):
self.description = description
def set_mtu(self, mtu):
self.mtu = mtu
def set_disable(self, value):
if (value == True):
self.disable = ""
else:
self.disable = None
def set_address(self, address):
self.address.append(address)
| true | true |
f724dbc632ab957d93fb0b05c7dd5db1e521ac4b | 1,048 | py | Python | RosViewer.py | MikeHallettUK/RosRobotics | 953486cfd042d6adec1edaf425243eac0f473571 | [
"CC0-1.0"
] | null | null | null | RosViewer.py | MikeHallettUK/RosRobotics | 953486cfd042d6adec1edaf425243eac0f473571 | [
"CC0-1.0"
] | null | null | null | RosViewer.py | MikeHallettUK/RosRobotics | 953486cfd042d6adec1edaf425243eac0f473571 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# RosViewer.py = node that listens to a ROS image message topic,
# and displays the image using OpenCV.
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_viewer: # "/camera/color/image_raw" or "/camera/color/video"
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.ros_cb, queue_size=1, buff_size=2 ** 24)
def ros_cb(self,msg):
cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8") # in msg.data as "rgb8" but is "bgr8" from RS camera ??
cv2.imshow("Ros video", cv_image)
key = cv2.waitKey(10) # in milliseconds
if key == 113: # 113 is the letter 'q'
cv2.destroyAllWindows()
rospy.signal_shutdown("Quitting")
print("Starting Ros video image_viewer v1.2 ; press q to quit in video-window.")
rospy.init_node('image_viewer', anonymous=True)
iv = image_viewer()
rospy.spin()
print("Finished")
| 36.137931 | 117 | 0.676527 |
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_viewer:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.ros_cb, queue_size=1, buff_size=2 ** 24)
def ros_cb(self,msg):
cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
cv2.imshow("Ros video", cv_image)
key = cv2.waitKey(10)
if key == 113:
cv2.destroyAllWindows()
rospy.signal_shutdown("Quitting")
print("Starting Ros video image_viewer v1.2 ; press q to quit in video-window.")
rospy.init_node('image_viewer', anonymous=True)
iv = image_viewer()
rospy.spin()
print("Finished")
| true | true |
f724dd1a39a7f175e46aa6568a64a8dd26d6775b | 251 | py | Python | temboo/core/Library/OneLogin/Roles/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/OneLogin/Roles/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/OneLogin/Roles/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | from temboo.Library.OneLogin.Roles.ListAll import ListAll, ListAllInputSet, ListAllResultSet, ListAllChoreographyExecution
from temboo.Library.OneLogin.Roles.ShowRole import ShowRole, ShowRoleInputSet, ShowRoleResultSet, ShowRoleChoreographyExecution
| 83.666667 | 127 | 0.888446 | from temboo.Library.OneLogin.Roles.ListAll import ListAll, ListAllInputSet, ListAllResultSet, ListAllChoreographyExecution
from temboo.Library.OneLogin.Roles.ShowRole import ShowRole, ShowRoleInputSet, ShowRoleResultSet, ShowRoleChoreographyExecution
| true | true |
f724dd6c5a854504a4b01aac06593f75753a45b0 | 4,911 | py | Python | ttt.py | JotaGo/tic-tac-toe | 237288f84bf388c219f6b5cf6cbae6334bfddb26 | [
"MIT"
] | null | null | null | ttt.py | JotaGo/tic-tac-toe | 237288f84bf388c219f6b5cf6cbae6334bfddb26 | [
"MIT"
] | null | null | null | ttt.py | JotaGo/tic-tac-toe | 237288f84bf388c219f6b5cf6cbae6334bfddb26 | [
"MIT"
] | null | null | null | import random
#GLOBAL VARIABLE
ttt = [[1,2,3],[4,5,6],[7,8,9]]
#PRINTING THE BOARD FUNCTION
def printing():
print()
for i , j in enumerate(ttt):
if i > 0:
print('---------')
print(j[0],'|',j[1],'|',j[2])
print()
#RESET THE BOARD
## WITH THIS FUNCTION THE USER CAN RESET BOARD TO PLAY AGAIN
## THIS FUNCTION WORKS FILLING THE LIST IN ORDER FROM ONE TO NINE
def reset_board():
nav1 , nav2 , cnt = 0 , 0 , 1
while nav1 < 3:
while nav2 < 3:
if ttt[nav1][nav2] != cnt:
ttt[nav1][nav2] = cnt
cnt += 1
nav2 +=1
nav2 = 0
nav1 +=1
def reset_game():
print()
while True:
user_o = input('Do you want to play again? (Y/n)\n')
if user_o.lower() == 'y':
reset_board()
return True
elif user_o.lower() == 'n':
return False
else:
print()
print('please enter a valid option')
#WINNING METHODS
##THIS FUNCTION WILL DETECT IF ARE A MATCH OF THREE X OR O IN A ROW
def winning_row():
for i in ttt:
cnt = 0
aux = i[0]
for j in i:
if aux == j:
cnt += 1
if cnt == 3 and aux == 'x':
return 'you win'
elif cnt == 3 and aux == 'o':
return 'you lose'
return False
##THIS FUNCTION WILL DETECT IF ARE A MATCH OF THREE X OR O IN A COLUMN
def winning_column():
nav1 , nav2 , cnt = 0 , 0 , 0
while nav2 < 3:
while nav1 < 2:
if ttt[nav1][nav2] == ttt[nav1 + 1][nav2]:
nav1 += 1
cnt += 1
if cnt == 2:
return win_declaretion(nav1,nav2)
else:
nav1 = 0
break
nav2 += 1
return False
##THIS FUNCTION WILL DETECT IF ARE A MATCH OF THREE X OR O IN A DIAGONAL
def winning_diagonal():
nav1,nav2,cnt = 0,0,0
while nav1 < 2 and nav2 < 2:
if ttt[nav1][nav2] == ttt[nav1 + 1][nav2 + 1]:
cnt += 1
nav1 += 1
nav2 += 1
if cnt == 2:
return win_declaretion(nav1,nav2)
else:
cnt = 0
nav1 = 0
nav2 = len(ttt[nav1]) - 1
break
while True:
if ttt[nav1][nav2] == ttt[nav1 + 1][nav2 - 1]:
cnt += 1
nav1 += 1
nav2 -= 1
if cnt == 2:
return win_declaretion(nav1,nav2)
else:
break
return False
###THIS FUNCTION IS TO AVOID REPEATING THE SAME CONSULT IN ALL OF THE WINNING METHODS
def win_declaretion(nav1,nav2):
if ttt[nav1][nav2] == 'x':
return 'you win'
elif ttt[nav1][nav2] == 'o':
return 'you lose'
#USER OPTION
def selection(opt):
nav1 , nav2 = 0 , 0
while nav1 < 3:
while nav2 < 3:
if opt == ttt[nav1][nav2]:
ttt[nav1][nav2] = 'x'
find = True
return find
else:
find = False
nav2 += 1
nav2 = 0
nav1 += 1
return find
#THIS FUNCTION WILL SELECT RANDOMLY A OPTION FOR THE CPU
##WITHOUT THE METHODS OF WINNING IN THE MAIN FUNCTION THE GAME WILL CRASH
##BECAUSE AT THE END IT WILL ENTER IN A INFINITE LOOP LOOKING FOR A AVAILABLE SPOT
def cpu_option():
while True:
nav1 , nav2 = 0 , 0
cpu_opt = random.randint(1,9)
while nav1 < 3:
while nav2 < 3:
if cpu_opt == ttt[nav1][nav2]:
ttt[nav1][nav2] = 'o'
find = True
return find
nav2 += 1
nav2 = 0
nav1 += 1
def end_game(final):
if final == 'you win':
print('congratulations you win!')
return True
elif final == 'you lose':
print('how sad, you lose :(')
return True
if __name__ == "__main__":
on = True
flag = False
while on:
printing()
option = int(input('Select a spot of the board: '))
while not selection(option):
print('that spot is occupied')
printing()
option = int(input('Select a spot of the board: '))
if not flag:
flag = winning_row()
if not flag:
flag = winning_column()
if not flag:
flag = winning_diagonal()
if flag:
printing()
end_game(flag)
on = reset_game()
if on:
flag = False
cpu_option()
if not flag:
flag = winning_row()
if not flag:
flag = winning_column()
if not flag:
flag = winning_diagonal()
if flag:
printing()
end_game(flag)
on = reset_game()
if on:
flag = False | 27.283333 | 85 | 0.476074 | import random
ttt = [[1,2,3],[4,5,6],[7,8,9]]
def printing():
print()
for i , j in enumerate(ttt):
if i > 0:
print('---------')
print(j[0],'|',j[1],'|',j[2])
print()
nav2] != cnt:
ttt[nav1][nav2] = cnt
cnt += 1
nav2 +=1
nav2 = 0
nav1 +=1
def reset_game():
print()
while True:
user_o = input('Do you want to play again? (Y/n)\n')
if user_o.lower() == 'y':
reset_board()
return True
elif user_o.lower() == 'n':
return False
else:
print()
print('please enter a valid option')
= i[0]
for j in i:
if aux == j:
cnt += 1
if cnt == 3 and aux == 'x':
return 'you win'
elif cnt == 3 and aux == 'o':
return 'you lose'
return False
v2 < 3:
while nav1 < 2:
if ttt[nav1][nav2] == ttt[nav1 + 1][nav2]:
nav1 += 1
cnt += 1
if cnt == 2:
return win_declaretion(nav1,nav2)
else:
nav1 = 0
break
nav2 += 1
return False
nd nav2 < 2:
if ttt[nav1][nav2] == ttt[nav1 + 1][nav2 + 1]:
cnt += 1
nav1 += 1
nav2 += 1
if cnt == 2:
return win_declaretion(nav1,nav2)
else:
cnt = 0
nav1 = 0
nav2 = len(ttt[nav1]) - 1
break
while True:
if ttt[nav1][nav2] == ttt[nav1 + 1][nav2 - 1]:
cnt += 1
nav1 += 1
nav2 -= 1
if cnt == 2:
return win_declaretion(nav1,nav2)
else:
break
return False
):
nav1 , nav2 = 0 , 0
while nav1 < 3:
while nav2 < 3:
if opt == ttt[nav1][nav2]:
ttt[nav1][nav2] = 'x'
find = True
return find
else:
find = False
nav2 += 1
nav2 = 0
nav1 += 1
return find
if cpu_opt == ttt[nav1][nav2]:
ttt[nav1][nav2] = 'o'
find = True
return find
nav2 += 1
nav2 = 0
nav1 += 1
def end_game(final):
if final == 'you win':
print('congratulations you win!')
return True
elif final == 'you lose':
print('how sad, you lose :(')
return True
if __name__ == "__main__":
on = True
flag = False
while on:
printing()
option = int(input('Select a spot of the board: '))
while not selection(option):
print('that spot is occupied')
printing()
option = int(input('Select a spot of the board: '))
if not flag:
flag = winning_row()
if not flag:
flag = winning_column()
if not flag:
flag = winning_diagonal()
if flag:
printing()
end_game(flag)
on = reset_game()
if on:
flag = False
cpu_option()
if not flag:
flag = winning_row()
if not flag:
flag = winning_column()
if not flag:
flag = winning_diagonal()
if flag:
printing()
end_game(flag)
on = reset_game()
if on:
flag = False | true | true |
f724dd876dd86bd7229b96394df79995ae66159a | 2,035 | py | Python | test/TestShellWithoutPipefail.py | chilicheech/ansible-lint | 57d4d3346179bb4142aeb7218dbf5f91befcab72 | [
"MIT"
] | null | null | null | test/TestShellWithoutPipefail.py | chilicheech/ansible-lint | 57d4d3346179bb4142aeb7218dbf5f91befcab72 | [
"MIT"
] | null | null | null | test/TestShellWithoutPipefail.py | chilicheech/ansible-lint | 57d4d3346179bb4142aeb7218dbf5f91befcab72 | [
"MIT"
] | null | null | null | # pylint: disable=preferred-module # FIXME: remove once migrated per GH-725
import unittest
from ansiblelint.rules import RulesCollection
from ansiblelint.rules.ShellWithoutPipefail import ShellWithoutPipefail
from ansiblelint.testing import RunFromText
FAIL_TASKS = '''
---
- hosts: localhost
become: no
tasks:
- name: pipeline without pipefail
shell: false | cat
- name: pipeline with or and pipe, no pipefail
shell: false || true | cat
- shell: |
df | grep '/dev'
'''
SUCCESS_TASKS = '''
---
- hosts: localhost
become: no
tasks:
- name: pipeline with pipefail
shell: set -o pipefail && false | cat
- name: pipeline with pipefail, multi-line
shell: |
set -o pipefail
false | cat
- name: pipeline with pipefail, complex set
shell: |
set -e -x -o pipefail
false | cat
- name: pipeline with pipefail, complex set
shell: |
set -e -x -o pipefail
false | cat
- name: pipeline with pipefail, complex set
shell: |
set -eo pipefail
false | cat
- name: pipeline without pipefail, ignoring errors
shell: false | cat
ignore_errors: true
- name: non-pipeline without pipefail
shell: "true"
- name: command without pipefail
command: "true"
- name: shell with or
shell:
false || true
- shell: |
set -o pipefail
df | grep '/dev'
- name: should not fail due to ignore_errors being true
shell: false | cat
ignore_errors: true
'''
class TestShellWithoutPipeFail(unittest.TestCase):
collection = RulesCollection()
collection.register(ShellWithoutPipefail())
def setUp(self):
self.runner = RunFromText(self.collection)
def test_fail(self):
results = self.runner.run_playbook(FAIL_TASKS)
self.assertEqual(3, len(results))
def test_success(self):
results = self.runner.run_playbook(SUCCESS_TASKS)
self.assertEqual(0, len(results))
| 22.865169 | 76 | 0.633907 | import RulesCollection
from ansiblelint.rules.ShellWithoutPipefail import ShellWithoutPipefail
from ansiblelint.testing import RunFromText
FAIL_TASKS = '''
---
- hosts: localhost
become: no
tasks:
- name: pipeline without pipefail
shell: false | cat
- name: pipeline with or and pipe, no pipefail
shell: false || true | cat
- shell: |
df | grep '/dev'
'''
SUCCESS_TASKS = '''
---
- hosts: localhost
become: no
tasks:
- name: pipeline with pipefail
shell: set -o pipefail && false | cat
- name: pipeline with pipefail, multi-line
shell: |
set -o pipefail
false | cat
- name: pipeline with pipefail, complex set
shell: |
set -e -x -o pipefail
false | cat
- name: pipeline with pipefail, complex set
shell: |
set -e -x -o pipefail
false | cat
- name: pipeline with pipefail, complex set
shell: |
set -eo pipefail
false | cat
- name: pipeline without pipefail, ignoring errors
shell: false | cat
ignore_errors: true
- name: non-pipeline without pipefail
shell: "true"
- name: command without pipefail
command: "true"
- name: shell with or
shell:
false || true
- shell: |
set -o pipefail
df | grep '/dev'
- name: should not fail due to ignore_errors being true
shell: false | cat
ignore_errors: true
'''
class TestShellWithoutPipeFail(unittest.TestCase):
collection = RulesCollection()
collection.register(ShellWithoutPipefail())
def setUp(self):
self.runner = RunFromText(self.collection)
def test_fail(self):
results = self.runner.run_playbook(FAIL_TASKS)
self.assertEqual(3, len(results))
def test_success(self):
results = self.runner.run_playbook(SUCCESS_TASKS)
self.assertEqual(0, len(results))
| true | true |
f724de43aa9b83eb0afc55ae9f946720ab6db30a | 38,330 | py | Python | tests/system/robot/chromeTests.py | krzysz00/nvda | d34444242a529098499131165a3e60d5a05ac96f | [
"bzip2-1.0.6"
] | 1,592 | 2015-11-10T12:05:44.000Z | 2022-03-31T11:50:40.000Z | tests/system/robot/chromeTests.py | krzysz00/nvda | d34444242a529098499131165a3e60d5a05ac96f | [
"bzip2-1.0.6"
] | 9,479 | 2015-11-10T20:56:48.000Z | 2022-03-31T23:51:30.000Z | tests/system/robot/chromeTests.py | TheQuinbox/nvda | 9c7b763a2428b43802758a3859de8708cefcd4a0 | [
"bzip2-1.0.6"
] | 682 | 2015-11-10T11:19:23.000Z | 2022-03-31T07:51:29.000Z | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2020-2021 NV Access Limited, Leonard de Ruijter
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
"""Logic for NVDA + Google Chrome tests
"""
import os
from robot.libraries.BuiltIn import BuiltIn
# imported methods start with underscore (_) so they don't get imported into robot files as keywords
from SystemTestSpy import (
_getLib,
)
# Imported for type information
from ChromeLib import ChromeLib as _ChromeLib
from AssertsLib import AssertsLib as _AssertsLib
import NvdaLib as _NvdaLib
_builtIn: BuiltIn = BuiltIn()
_chrome: _ChromeLib = _getLib("ChromeLib")
_asserts: _AssertsLib = _getLib("AssertsLib")
#: Double space is used to separate semantics in speech output this typically
# adds a slight pause to the synthesizer.
SPEECH_SEP = " "
SPEECH_CALL_SEP = '\n'
#: single space is used to separate semantics in braille output.
BRAILLE_SEP = " "
ARIAExamplesDir = os.path.join(
_NvdaLib._locations.repoRoot, "include", "w3c-aria-practices", "examples"
)
def checkbox_labelled_by_inner_element():
_chrome.prepareChrome(
r"""
<div tabindex="0" role="checkbox" aria-labelledby="inner-label">
<div style="display:inline" id="inner-label">
Simulate evil cat
</div>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterTab()
_asserts.strings_match(
actualSpeech,
# The name for the element is also in it's content, the name is spoken twice:
# "Simulate evil cat Simulate evil cat check box not checked"
# Instead this should be spoken as:
"Simulate evil cat check box not checked"
)
def test_mark_aria_details():
_chrome.prepareChrome(
"""
<div>
<p>The word <mark aria-details="cat-details">cat</mark> has a comment tied to it.</p>
<div id="cat-details" role="comment">
Cats go woof BTW<br>—Jonathon Commentor
<div role="comment">
No they don't<br>—Zara
</div>
<div role="form">
<textarea cols="80" placeholder="Add reply..."></textarea>
<input type="submit">
</div>
</div>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
"The word highlighted has details cat out of highlighted has a comment tied to it."
)
# this word has no details attached
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"word"
)
# check that there is no summary reported
actualSpeech = _chrome.getSpeechAfterKey("NVDA+\\")
_asserts.strings_match(
actualSpeech,
"No additional details"
)
# this word has details attached to it
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"highlighted has details cat out of highlighted"
)
# read the details summary
actualSpeech = _chrome.getSpeechAfterKey("NVDA+\\")
_asserts.strings_match(
actualSpeech,
"Cats go woof BTW Jonathon Commentor No they don't Zara Submit"
)
def announce_list_item_when_moving_by_word_or_character():
_chrome.prepareChrome(
r"""
<div contenteditable="true">
<p>Before list</p>
<ul style="list-style-type:none">
<li>small cat</li>
<li>big dog</li>
</ul>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
# Tab into the contenteditable
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable Before list"
)
# Ensure that moving into a list by line, "list item" is not reported.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"list small cat"
)
# Ensure that when moving by word (control+rightArrow)
# within the list item, "list item" is not announced.
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"cat"
)
# Ensure that when moving by character (rightArrow)
# within the list item, "list item" is not announced.
actualSpeech = _chrome.getSpeechAfterKey("rightArrow")
_asserts.strings_match(
actualSpeech,
"a"
)
# move to the end of the line (and therefore the list item)
actualSpeech = _chrome.getSpeechAfterKey("end")
_asserts.strings_match(
actualSpeech,
"blank"
)
# Ensure that when moving by character (rightArrow)
# onto the next list item, "list item" is reported.
actualSpeech = _chrome.getSpeechAfterKey("rightArrow")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
"list item level 1",
"b"
])
)
# Ensure that when moving by character (leftArrow)
# onto the previous list item, "list item" is reported.
# Note this places us on the end-of-line insertion point of the previous list item.
actualSpeech = _chrome.getSpeechAfterKey("leftArrow")
_asserts.strings_match(
actualSpeech,
"list item level 1"
)
# Ensure that when moving by word (control+rightArrow)
# onto the next list item, "list item" is reported.
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"list item level 1 big"
)
# Ensure that when moving by word (control+leftArrow)
# onto the previous list item, "list item" is reported.
# Note this places us on the end-of-line insertion point of the previous list item.
actualSpeech = _chrome.getSpeechAfterKey("control+leftArrow")
_asserts.strings_match(
actualSpeech,
"list item level 1"
)
def test_i7562():
""" List should not be announced on every line of a ul in a contenteditable """
_chrome.prepareChrome(
r"""
<div contenteditable="true">
<p>before</p>
<ul>
<li>frogs</li>
<li>birds</li>
</ul>
<p>after</p>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
# Tab into the contenteditable
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable before"
)
# DownArow into the list. 'list' should be announced when entering.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"list bullet frogs"
)
# DownArrow to the second list item. 'list' should not be announced.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"bullet birds"
)
# DownArrow out of the list. 'out of list' should be announced.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"out of list after",
)
def test_pr11606():
"""
Announce the correct line when placed at the end of a link at the end of a list item in a contenteditable
"""
_chrome.prepareChrome(
r"""
<div contenteditable="true">
<ul>
<li><a href="#">A</a> <a href="#">B</a></li>
<li>C D</li>
</ul>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
# Tab into the contenteditable
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable list bullet link A link B"
)
# move past the end of the first link.
# This should not be affected due to pr #11606.
actualSpeech = _chrome.getSpeechAfterKey("rightArrow")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
"out of link",
"space"
])
)
# Move to the end of the line (which is also the end of the second link)
# Before pr #11606 this would have announced the bullet on the next line.
actualSpeech = _chrome.getSpeechAfterKey("end")
_asserts.strings_match(
actualSpeech,
"link"
)
# Read the current line.
# Before pr #11606 the next line ("C D") would have been read.
actualSpeech = _chrome.getSpeechAfterKey("NVDA+upArrow")
_asserts.strings_match(
actualSpeech,
"bullet link A link B"
)
def test_ariaTreeGrid_browseMode():
"""
Ensure that ARIA treegrids are accessible as a standard table in browse mode.
"""
testFile = os.path.join(ARIAExamplesDir, "treegrid", "treegrid-1.html")
_chrome.prepareChrome(
f"""
<iframe src="{testFile}"></iframe>
"""
)
# Jump to the first heading in the iframe.
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
"frame main landmark Treegrid Email Inbox Example heading level 1"
)
# Tab to the first link.
# This ensures that focus is totally within the iframe
# so as to not cause focus to hit the iframe's document
# when entering focus mode on the treegrid later.
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"issue 790. link"
)
# Jump to the ARIA treegrid with the next table quicknav command.
# The browse mode caret will be inside the table on the caption before the first row.
actualSpeech = _chrome.getSpeechAfterKey("t")
_asserts.strings_match(
actualSpeech,
"Inbox table clickable with 5 rows and 3 columns Inbox"
)
# Move past the caption onto row 1 with downArrow
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"row 1 column 1 Subject"
)
# Navigate to row 2 column 1 with NVDA table navigation command
actualSpeech = _chrome.getSpeechAfterKey("control+alt+downArrow")
_asserts.strings_match(
actualSpeech,
"expanded level 1 row 2 Treegrids are awesome"
)
# Press enter to activate NVDA focus mode and focus the current row
actualSpeech = _chrome.getSpeechAfterKey("enter")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
# focus mode turns on
"Focus mode",
# Focus enters the ARIA treegrid (table)
"Inbox table",
# Focus lands on row 2
"level 1 Treegrids are awesome Want to learn how to use them? aaron at thegoogle dot rocks expanded",
])
)
def ARIAInvalid_spellingAndGrammar():
"""
Tests ARIA invalid values of "spelling", "grammar" and "spelling, grammar".
Please note that although IAccessible2 allows multiple values for invalid,
multiple values to aria-invalid is not yet standard.
And even if it were, they would be separated by space, not comma
thus the html for this test would need to change,
but the expected output shouldn't need to.
"""
_chrome.prepareChrome(
r"""
<p>Big <span aria-invalid="spelling">caat</span> meos</p>
<p>Small <span aria-invalid="grammar">a dog</span> woofs</p>
<p>Fat <span aria-invalid="grammar, spelling">a ffrog</span> crokes</p>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Big spelling error caat meos"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Small grammar error a dog woofs"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Fat spelling error grammar error a ffrog crokes"
)
def test_ariaCheckbox_browseMode():
"""
Navigate to an unchecked checkbox in reading mode.
"""
testFile = os.path.join(ARIAExamplesDir, "checkbox", "checkbox-1", "checkbox-1.html")
_chrome.prepareChrome(
f"""
<iframe src="{testFile}"></iframe>
"""
)
# Jump to the first heading in the iframe.
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
"frame main landmark Checkbox Example (Two State) heading level 1"
)
# Navigate to the checkbox.
actualSpeech = _chrome.getSpeechAfterKey("x")
_asserts.strings_match(
actualSpeech,
"Sandwich Condiments grouping list with 4 items Lettuce check box not checked"
)
def test_i12147():
"""
New focus target should be announced if the triggering element is removed when activated.
"""
_chrome.prepareChrome(
f"""
<div>
<button id='trigger0'>trigger 0</button>
<h4 id='target0' tabindex='-1'>target 0</h4>
</div>
<script>
let trigger0 = document.querySelector('#trigger0');
trigger0.addEventListener('click', e => {{
let focusTarget = document.querySelector('#target0');
trigger0.remove();
focusTarget.focus();
}})
</script>
"""
)
# Jump to the first button (the trigger)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"trigger 0 button"
)
# Activate the button, we should hear the new focus target.
actualSpeech = _chrome.getSpeechAfterKey("enter")
_asserts.strings_match(
actualSpeech,
"target 0 heading level 4"
)
def test_tableInStyleDisplayTable():
"""
Chrome treats nodes with `style="display: table"` as tables.
When a HTML style table is positioned in such a node, NVDA was previously unable to announce
table row and column count as well as provide table navigation for the inner table.
"""
_chrome.prepareChrome(
"""
<p>Paragraph</p>
<div style="display:table">
<table>
<thead>
<tr>
<th>First heading</th>
<th>Second heading</th>
</tr>
</thead>
<tbody>
<tr>
<td>First content cell</td>
<td>Second content cell</td>
</tr>
</tbody>
</table>
</div>
"""
)
# Jump to the table
actualSpeech = _chrome.getSpeechAfterKey("t")
_asserts.strings_match(
actualSpeech,
"table with 2 rows and 2 columns row 1 column 1 First heading"
)
nextActualSpeech = _chrome.getSpeechAfterKey("control+alt+downArrow")
_asserts.strings_match(
nextActualSpeech,
"row 2 First content cell"
)
def test_ariaRoleDescription_focus():
"""
NVDA should report the custom role of an object on focus.
"""
_chrome.prepareChrome(
"""
<button aria-roledescription="pizza">Cheese</button><br />
<button aria-roledescription="pizza">Meat</button>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"Cheese pizza"
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"Meat pizza"
)
def test_ariaRoleDescription_inline_browseMode():
"""
NVDA should report the custom role for inline elements in browse mode.
"""
_chrome.prepareChrome(
"""
<p>Start
<img aria-roledescription="drawing" alt="Our logo" src="https://www.nvaccess.org/images/logo.png" />
End</p>
"""
)
# When reading the entire line,
# entering the custom role should be reported,
# but not exiting
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Start drawing Our logo End"
)
# When reading the line by word,
# Both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"drawing Our"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"logo out of drawing"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"End"
)
def test_ariaRoleDescription_block_browseMode():
"""
NVDA should report the custom role at start and end for block elements in browse mode.
"""
_chrome.prepareChrome(
"""
<aside aria-roledescription="warning">
<p>Wet paint!</p>
<p>Please be careful.</p>
</aside>
<p>End</p>
"""
)
# when reading the page by line,
# both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"warning Wet paint!"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Please be careful."
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"out of warning End"
)
def test_ariaRoleDescription_inline_contentEditable():
"""
NVDA should report the custom role for inline elements in content editables.
"""
_chrome.prepareChrome(
"""
<div contenteditable="true">
<p>Top line</p>
<p>Start
<img aria-roledescription="drawing" alt="Our logo" src="https://www.nvaccess.org/images/logo.png" />
End</p>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable Top line"
)
# When reading the entire line,
# entering the custom role should be reported,
# but not exiting
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Start drawing Our logo End"
)
# When reading the line by word,
# Both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"drawing Our logo out of drawing"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"End"
)
def test_ariaRoleDescription_block_contentEditable():
"""
NVDA should report the custom role at start and end for block elements in content editables.
"""
_chrome.prepareChrome(
"""
<div contenteditable="true">
<p>Top line</p>
<aside aria-roledescription="warning">
<p>Wet paint!</p>
<p>Please be careful.</p>
</aside>
<p>End</p>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable Top line"
)
# when reading the page by line,
# both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"warning Wet paint!"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Please be careful."
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"out of warning End"
)
def _getAriaDescriptionSample() -> str:
annotation = "User nearby, Aaron"
linkDescription = "opens in a new tab"
# link title should be read in focus
linkTitle = "conduct a search"
linkContents = "to google's"
return f"""
<div>
<div
contenteditable=""
spellcheck="false"
role="textbox"
aria-multiline="true"
><p>This is a line with no annotation</p>
<p><span
aria-description="{annotation}"
>Here is a sentence that is being edited by someone else.</span>
<b>Multiple can edit this.</b></p>
<p>An element with a role, follow <a
href="www.google.com"
aria-description="{linkDescription}"
>{linkContents}</a
> website</p>
<p>Testing the title attribute, <a
href="www.google.com"
title="{linkTitle}"
>{linkContents}</a
> website</p>
</div>
</div>
"""
def test_ariaDescription_focusMode():
""" Ensure aria description is read in focus mode.
Settings which may affect this:
- speech.reportObjectDescriptions default:True
- annotations.reportAriaDescription default:True
"""
_chrome.prepareChrome(_getAriaDescriptionSample())
# Focus the contenteditable and automatically switch to focus mode (due to contenteditable)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"edit multi line This is a line with no annotation\nFocus mode"
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# description-from hasn't reached Chrome stable yet.
# reporting aria-description only supported in Chrome canary 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"User nearby, Aaron", # annotation
"Here is a sentence that is being edited by someone else.", # span text
"Multiple can edit this.", # bold paragraph text
])
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# description-from hasn't reached Chrome stable yet.
# reporting aria-description only supported in Chrome canary 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([ # two space separator
"An element with a role, follow", # paragraph text
"link", # link role
"opens in a new tab", # link description
"to google's", # link contents (name)
"website" # paragraph text
])
)
# 'title' attribute for link ("conduct a search") should not be announced.
# too often title is used without screen reader users in mind, and is overly verbose.
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Testing the title attribute,", # paragraph text
"link", # link role
"to google's", # link contents (name)
"website" # paragraph text
])
)
def test_ariaDescription_browseMode():
""" Ensure aria description is read in browse mode.
Settings which may affect this:
- speech.reportObjectDescriptions default:True
- annotations.reportAriaDescription default:True
"""
_chrome.prepareChrome(_getAriaDescriptionSample())
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"edit multi line This is a line with no annotation"
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# description-from hasn't reached Chrome stable yet.
# reporting aria-description only supported in Chrome canary 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"User nearby, Aaron", # annotation
"Here is a sentence that is being edited by someone else.", # span text
"Multiple can edit this.", # bold paragraph text
])
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# description-from hasn't reached Chrome stable yet.
# reporting aria-description only supported in Chrome canary 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([ # two space separator
"An element with a role, follow", # paragraph text
"link", # link role
"opens in a new tab", # link description
"to google's", # link contents (name)
"website" # paragraph text
])
)
# 'title' attribute for link ("conduct a search") should not be announced.
# too often title is used without screen reader users in mind, and is overly verbose.
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Testing the title attribute,", # paragraph text
"link", # link role
"to google's", # link contents (name)
"website" # paragraph text
])
)
def test_ariaDescription_sayAll():
""" Ensure aria description is read by say all.
# Historically, description was not announced at all in browse mode with arrow navigation,
# annotations are now a special case.
Settings which may affect this:
- speech.reportObjectDescriptions default:True
- annotations.reportAriaDescription default:True
"""
_chrome.prepareChrome(_getAriaDescriptionSample())
actualSpeech = _chrome.getSpeechAfterKey("NVDA+downArrow")
# Reporting aria-description only supported in:
# - Chrome 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
"Test page load complete",
"edit multi line This is a line with no annotation",
SPEECH_SEP.join([
"User nearby, Aaron", # annotation
"Here is a sentence that is being edited by someone else.", # span text
"Multiple can edit this.", # bold paragraph text
]),
SPEECH_SEP.join([ # two space separator
"An element with a role, follow", # paragraph text
"link", # link role
"opens in a new tab", # link description
"to google's", # link contents (name)
"website", # paragraph text
]),
# 'title' attribute for link ("conduct a search") should not be announced.
# too often title is used without screen reader users in mind, and is overly verbose.
SPEECH_SEP.join([
"Testing the title attribute,", # paragraph text
"link", # link role
# note description missing when sourced from title attribute
"to google's", # link contents (name)
"website", # paragraph text
"out of edit"
]),
"After Test Case Marker"
])
)
def test_i10840():
"""
The name of table header cells should only be conveyed once when navigating directly to them in browse mode
Chrome self-references a header cell as its own header, which used to cause the name to be announced twice
"""
_chrome.prepareChrome(
f"""
<table>
<thead>
<tr>
<th>Month</th>
<th>items</th>
</tr>
</thead>
<tbody>
<tr>
<td>January</td>
<td>100</td>
</tr>
<tr>
<td>February</td>
<td>80</td>
</tr>
</tbody>
<tfoot>
<tr>
<td>Sum</td>
<td>180</td>
</tr>
</tfoot>
</table>
"""
)
# Jump to the table
actualSpeech = _chrome.getSpeechAfterKey("t")
_asserts.strings_match(
actualSpeech,
"table with 4 rows and 2 columns row 1 column 1 Month"
)
nextActualSpeech = _chrome.getSpeechAfterKey("control+alt+rightArrow")
_asserts.strings_match(
nextActualSpeech,
"column 2 items"
)
def test_mark_browse():
_chrome.prepareChrome(
"""
<div>
<p>The word <mark>Kangaroo</mark> is important.</p>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
"The word highlighted Kangaroo out of highlighted is important."
)
# Test moving by word
actualSpeech = _chrome.getSpeechAfterKey("numpad6")
_asserts.strings_match(
actualSpeech,
"word"
)
actualSpeech = _chrome.getSpeechAfterKey("numpad6")
_asserts.strings_match(
actualSpeech,
"highlighted Kangaroo out of highlighted"
)
def test_mark_focus():
_chrome.prepareChrome(
"""
<div>
<p>The word <mark><a href="#">Kangaroo</a></mark> is important.</p>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"highlighted\nKangaroo link"
)
def test_preventDuplicateSpeechFromDescription_browse_tab():
"""
When description matches name/content, it should not be spoken.
This prevents duplicate speech.
Settings which may affect this:
- speech.reportObjectDescriptions default:True
"""
spy = _NvdaLib.getSpyLib()
REPORT_OBJ_DESC_KEY = ["presentation", "reportObjectDescriptions"]
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
_chrome.prepareChrome(
"""
<a href="#" title="apple" style="display:block">apple</a>
<a href="#" title="banana" aria-label="banana" style="display:block">contents</a>
"""
)
# Read in browse
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"apple link"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"banana link"
)
def preventDuplicateSpeechFromDescription_focus():
"""
When description matches name/content, it should not be spoken.
This prevents duplicate speech.
Settings which may affect this:
- speech.reportObjectDescriptions default:True
"""
spy = _NvdaLib.getSpyLib()
REPORT_OBJ_DESC_KEY = ["presentation", "reportObjectDescriptions"]
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
_chrome.prepareChrome(
"""
<a href="#" title="apple" style="display:block">apple</a>
<a href="#" title="banana" aria-label="banana" style="display:block">contents</a>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"apple link"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"banana link"
)
def test_ensureNoBrowseModeDescription():
"""
Test that option (speech.reportObjectDescriptions default:True)
does not result in description in browse mode.
"""
REPORT_OBJ_DESC_KEY = ["presentation", "reportObjectDescriptions"]
spy = _NvdaLib.getSpyLib()
# prevent browse / focus mode messages from interfering, 0 means don't show.
spy.set_configValue(["braille", "messageTimeout"], 0)
_chrome.prepareChrome(
"\n".join([
r'<button>something for focus</button>'
r'<a href="#" style="display:block" title="Cat">Apple</a>',
# second link to make testing second focus mode tab easier
r'<a href="#" style="display:block" title="Fish">Banana</a>',
])
)
actualSpeech = _NvdaLib.getSpeechAfterKey('tab')
_builtIn.should_contain(actualSpeech, "something for focus")
# Test Browse mode
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey('downArrow')
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"link", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=True"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"lnk", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=True"
)
# move virtual cursor back up to reset to start position
actualSpeech = _NvdaLib.getSpeechAfterKey('upArrow')
_builtIn.should_contain(actualSpeech, "something for focus")
spy.set_configValue(REPORT_OBJ_DESC_KEY, False)
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey('downArrow')
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"link", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=False"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"lnk", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=False"
)
# move virtual cursor back up to reset to start position
actualSpeech = _NvdaLib.getSpeechAfterKey('upArrow')
_builtIn.should_contain(actualSpeech, "something for focus")
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
# Test focus mode
actualSpeech = _NvdaLib.getSpeechAfterKey("nvda+space")
_asserts.speech_matches(actualSpeech, "Focus mode")
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey("tab")
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"Apple", # link name / contents
"link", # role description
"Cat", # link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=True"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"Apple", # link name / contents
"lnk", # role description
"Cat", # link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=True"
)
# Use second link to test focus mode when 'reportObjectDescriptions' is off.
spy.set_configValue(REPORT_OBJ_DESC_KEY, False)
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey("tab")
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"Banana", # link name / contents
"link", # role description
# No link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=False"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"Banana", # link name / contents
"lnk", # role description
# No link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=False"
)
def test_quickNavTargetReporting():
"""
When using quickNav, the target object should be spoken first, inner context should be given before outer
context.
"""
spy = _NvdaLib.getSpyLib()
REPORT_ARTICLES = ["documentFormatting", "reportArticles"]
spy.set_configValue(REPORT_ARTICLES, False)
_chrome.prepareChrome(
"""
<div
aria-describedby="descId"
aria-labelledby="labelId"
role="article"
>
<h1>Quick Nav Target</h1>
<div id="labelId">
<div>Some name.</div>
</div>
<div id="descId">
<span>A bunch of text.</span>
</div>
</div>
"""
)
# Quick nav to heading
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Quick Nav Target", # Heading content (quick nav target), should read first
"heading", # Heading role
"level 1", # Heading level
])
)
# Reset to allow trying again with report articles enabled
actualSpeech = _chrome.getSpeechAfterKey("control+home")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Before Test Case Marker",
])
)
# Quick nav to heading with report articles enabled
spy.set_configValue(REPORT_ARTICLES, True)
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Quick Nav Target", # Heading content (quick nav target), should read first
"heading", # Heading role
"level 1", # Heading level
"article", # article role, enabled via report article
"A bunch of text.", # article (ancestor) description
])
)
def test_focusTargetReporting():
"""
When moving focus the target object should be spoken first, inner context should be given before outer
context.
"""
spy = _NvdaLib.getSpyLib()
REPORT_ARTICLES = ["documentFormatting", "reportArticles"]
spy.set_configValue(REPORT_ARTICLES, False)
_chrome.prepareChrome(
"""
<a href="#">before Target</a>
<div
aria-describedby="descId"
aria-labelledby="labelId"
role="article"
>
<a href="#">Focus Target</a>
<div id="labelId">
<div>Some name.</div>
</div>
<div id="descId">
<span>A bunch of text.</span>
</div>
</div>
"""
)
# Set focus
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Focus the link
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
]),
message="browse mode - focus with Report Articles disabled"
)
# Reset to allow trying again with report articles enabled
actualSpeech = _chrome.getSpeechAfterKey("shift+tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Focus the link with report articles enabled
spy.set_configValue(REPORT_ARTICLES, True)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
"article", # article role, enabled via report article
"A bunch of text.", # article (ancestor) description
]),
message="browse mode - focus with Report Articles enabled"
)
# Reset to allow trying again in focus mode
actualSpeech = _chrome.getSpeechAfterKey("shift+tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
spy.set_configValue(REPORT_ARTICLES, False)
# Focus the link
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
SPEECH_SEP.join([
"Some name.", # name for article
"article", # article role, enabled via report article
"A bunch of text.", # description for article
]),
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
]),
]),
message="focus mode - focus with Report Articles disabled"
)
# Reset to allow trying again with report articles enabled
actualSpeech = _chrome.getSpeechAfterKey("shift+tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Focus the link with report articles enabled
spy.set_configValue(REPORT_ARTICLES, True)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
SPEECH_SEP.join([
"Some name.", # name for article
"article", # article role, enabled via report article
"A bunch of text.", # description for article
]),
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
]),
]),
message="focus mode - focus with Report Articles enabled"
)
| 28.776276 | 109 | 0.688469 |
import os
from robot.libraries.BuiltIn import BuiltIn
from SystemTestSpy import (
_getLib,
)
# Imported for type information
from ChromeLib import ChromeLib as _ChromeLib
from AssertsLib import AssertsLib as _AssertsLib
import NvdaLib as _NvdaLib
_builtIn: BuiltIn = BuiltIn()
_chrome: _ChromeLib = _getLib("ChromeLib")
_asserts: _AssertsLib = _getLib("AssertsLib")
#: Double space is used to separate semantics in speech output this typically
# adds a slight pause to the synthesizer.
SPEECH_SEP = " "
SPEECH_CALL_SEP = '\n'
#: single space is used to separate semantics in braille output.
BRAILLE_SEP = " "
ARIAExamplesDir = os.path.join(
_NvdaLib._locations.repoRoot, "include", "w3c-aria-practices", "examples"
)
def checkbox_labelled_by_inner_element():
_chrome.prepareChrome(
r"""
<div tabindex="0" role="checkbox" aria-labelledby="inner-label">
<div style="display:inline" id="inner-label">
Simulate evil cat
</div>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterTab()
_asserts.strings_match(
actualSpeech,
# The name for the element is also in it's content, the name is spoken twice:
"Simulate evil cat check box not checked"
)
def test_mark_aria_details():
_chrome.prepareChrome(
"""
<div>
<p>The word <mark aria-details="cat-details">cat</mark> has a comment tied to it.</p>
<div id="cat-details" role="comment">
Cats go woof BTW<br>—Jonathon Commentor
<div role="comment">
No they don't<br>—Zara
</div>
<div role="form">
<textarea cols="80" placeholder="Add reply..."></textarea>
<input type="submit">
</div>
</div>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
"The word highlighted has details cat out of highlighted has a comment tied to it."
)
# this word has no details attached
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"word"
)
# check that there is no summary reported
actualSpeech = _chrome.getSpeechAfterKey("NVDA+\\")
_asserts.strings_match(
actualSpeech,
"No additional details"
)
# this word has details attached to it
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"highlighted has details cat out of highlighted"
)
# read the details summary
actualSpeech = _chrome.getSpeechAfterKey("NVDA+\\")
_asserts.strings_match(
actualSpeech,
"Cats go woof BTW Jonathon Commentor No they don't Zara Submit"
)
def announce_list_item_when_moving_by_word_or_character():
_chrome.prepareChrome(
r"""
<div contenteditable="true">
<p>Before list</p>
<ul style="list-style-type:none">
<li>small cat</li>
<li>big dog</li>
</ul>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable Before list"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"list small cat"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"cat"
)
actualSpeech = _chrome.getSpeechAfterKey("rightArrow")
_asserts.strings_match(
actualSpeech,
"a"
)
actualSpeech = _chrome.getSpeechAfterKey("end")
_asserts.strings_match(
actualSpeech,
"blank"
)
actualSpeech = _chrome.getSpeechAfterKey("rightArrow")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
"list item level 1",
"b"
])
)
actualSpeech = _chrome.getSpeechAfterKey("leftArrow")
_asserts.strings_match(
actualSpeech,
"list item level 1"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"list item level 1 big"
)
actualSpeech = _chrome.getSpeechAfterKey("control+leftArrow")
_asserts.strings_match(
actualSpeech,
"list item level 1"
)
def test_i7562():
_chrome.prepareChrome(
r"""
<div contenteditable="true">
<p>before</p>
<ul>
<li>frogs</li>
<li>birds</li>
</ul>
<p>after</p>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable before"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"list bullet frogs"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"bullet birds"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"out of list after",
)
def test_pr11606():
_chrome.prepareChrome(
r"""
<div contenteditable="true">
<ul>
<li><a href="#">A</a> <a href="#">B</a></li>
<li>C D</li>
</ul>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable list bullet link A link B"
)
Speech = _chrome.getSpeechAfterKey("rightArrow")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
"out of link",
"space"
])
)
trings_match(
actualSpeech,
"link"
)
rrow")
_asserts.strings_match(
actualSpeech,
"bullet link A link B"
)
def test_ariaTreeGrid_browseMode():
testFile = os.path.join(ARIAExamplesDir, "treegrid", "treegrid-1.html")
_chrome.prepareChrome(
f"""
<iframe src="{testFile}"></iframe>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
"frame main landmark Treegrid Email Inbox Example heading level 1"
)
# when entering focus mode on the treegrid later.
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"issue 790. link"
)
# Jump to the ARIA treegrid with the next table quicknav command.
# The browse mode caret will be inside the table on the caption before the first row.
actualSpeech = _chrome.getSpeechAfterKey("t")
_asserts.strings_match(
actualSpeech,
"Inbox table clickable with 5 rows and 3 columns Inbox"
)
# Move past the caption onto row 1 with downArrow
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"row 1 column 1 Subject"
)
# Navigate to row 2 column 1 with NVDA table navigation command
actualSpeech = _chrome.getSpeechAfterKey("control+alt+downArrow")
_asserts.strings_match(
actualSpeech,
"expanded level 1 row 2 Treegrids are awesome"
)
# Press enter to activate NVDA focus mode and focus the current row
actualSpeech = _chrome.getSpeechAfterKey("enter")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
# focus mode turns on
"Focus mode",
# Focus enters the ARIA treegrid (table)
"Inbox table",
# Focus lands on row 2
"level 1 Treegrids are awesome Want to learn how to use them? aaron at thegoogle dot rocks expanded",
])
)
def ARIAInvalid_spellingAndGrammar():
_chrome.prepareChrome(
r"""
<p>Big <span aria-invalid="spelling">caat</span> meos</p>
<p>Small <span aria-invalid="grammar">a dog</span> woofs</p>
<p>Fat <span aria-invalid="grammar, spelling">a ffrog</span> crokes</p>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Big spelling error caat meos"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Small grammar error a dog woofs"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Fat spelling error grammar error a ffrog crokes"
)
def test_ariaCheckbox_browseMode():
testFile = os.path.join(ARIAExamplesDir, "checkbox", "checkbox-1", "checkbox-1.html")
_chrome.prepareChrome(
f"""
<iframe src="{testFile}"></iframe>
"""
)
# Jump to the first heading in the iframe.
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
"frame main landmark Checkbox Example (Two State) heading level 1"
)
# Navigate to the checkbox.
actualSpeech = _chrome.getSpeechAfterKey("x")
_asserts.strings_match(
actualSpeech,
"Sandwich Condiments grouping list with 4 items Lettuce check box not checked"
)
def test_i12147():
_chrome.prepareChrome(
f"""
<div>
<button id='trigger0'>trigger 0</button>
<h4 id='target0' tabindex='-1'>target 0</h4>
</div>
<script>
let trigger0 = document.querySelector('#trigger0');
trigger0.addEventListener('click', e => {{
let focusTarget = document.querySelector('#target0');
trigger0.remove();
focusTarget.focus();
}})
</script>
"""
)
# Jump to the first button (the trigger)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"trigger 0 button"
)
# Activate the button, we should hear the new focus target.
actualSpeech = _chrome.getSpeechAfterKey("enter")
_asserts.strings_match(
actualSpeech,
"target 0 heading level 4"
)
def test_tableInStyleDisplayTable():
_chrome.prepareChrome(
"""
<p>Paragraph</p>
<div style="display:table">
<table>
<thead>
<tr>
<th>First heading</th>
<th>Second heading</th>
</tr>
</thead>
<tbody>
<tr>
<td>First content cell</td>
<td>Second content cell</td>
</tr>
</tbody>
</table>
</div>
"""
)
# Jump to the table
actualSpeech = _chrome.getSpeechAfterKey("t")
_asserts.strings_match(
actualSpeech,
"table with 2 rows and 2 columns row 1 column 1 First heading"
)
nextActualSpeech = _chrome.getSpeechAfterKey("control+alt+downArrow")
_asserts.strings_match(
nextActualSpeech,
"row 2 First content cell"
)
def test_ariaRoleDescription_focus():
_chrome.prepareChrome(
"""
<button aria-roledescription="pizza">Cheese</button><br />
<button aria-roledescription="pizza">Meat</button>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"Cheese pizza"
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"Meat pizza"
)
def test_ariaRoleDescription_inline_browseMode():
_chrome.prepareChrome(
"""
<p>Start
<img aria-roledescription="drawing" alt="Our logo" src="https://www.nvaccess.org/images/logo.png" />
End</p>
"""
)
# When reading the entire line,
# entering the custom role should be reported,
# but not exiting
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Start drawing Our logo End"
)
# When reading the line by word,
# Both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"drawing Our"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"logo out of drawing"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"End"
)
def test_ariaRoleDescription_block_browseMode():
_chrome.prepareChrome(
"""
<aside aria-roledescription="warning">
<p>Wet paint!</p>
<p>Please be careful.</p>
</aside>
<p>End</p>
"""
)
# when reading the page by line,
# both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"warning Wet paint!"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Please be careful."
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"out of warning End"
)
def test_ariaRoleDescription_inline_contentEditable():
_chrome.prepareChrome(
"""
<div contenteditable="true">
<p>Top line</p>
<p>Start
<img aria-roledescription="drawing" alt="Our logo" src="https://www.nvaccess.org/images/logo.png" />
End</p>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable Top line"
)
# When reading the entire line,
# entering the custom role should be reported,
# but not exiting
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Start drawing Our logo End"
)
# When reading the line by word,
# Both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"drawing Our logo out of drawing"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"End"
)
def test_ariaRoleDescription_block_contentEditable():
_chrome.prepareChrome(
"""
<div contenteditable="true">
<p>Top line</p>
<aside aria-roledescription="warning">
<p>Wet paint!</p>
<p>Please be careful.</p>
</aside>
<p>End</p>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable Top line"
)
# when reading the page by line,
# both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"warning Wet paint!"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Please be careful."
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"out of warning End"
)
def _getAriaDescriptionSample() -> str:
annotation = "User nearby, Aaron"
linkDescription = "opens in a new tab"
# link title should be read in focus
linkTitle = "conduct a search"
linkContents = "to google's"
return f"""
<div>
<div
contenteditable=""
spellcheck="false"
role="textbox"
aria-multiline="true"
><p>This is a line with no annotation</p>
<p><span
aria-description="{annotation}"
>Here is a sentence that is being edited by someone else.</span>
<b>Multiple can edit this.</b></p>
<p>An element with a role, follow <a
href="www.google.com"
aria-description="{linkDescription}"
>{linkContents}</a
> website</p>
<p>Testing the title attribute, <a
href="www.google.com"
title="{linkTitle}"
>{linkContents}</a
> website</p>
</div>
</div>
"""
def test_ariaDescription_focusMode():
_chrome.prepareChrome(_getAriaDescriptionSample())
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"edit multi line This is a line with no annotation\nFocus mode"
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# reporting aria-description only supported in Chrome canary 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"User nearby, Aaron", # annotation
"Here is a sentence that is being edited by someone else.", # span text
"Multiple can edit this.", # bold paragraph text
])
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# description-from hasn't reached Chrome stable yet.
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"An element with a role, follow",
"link",
"opens in a new tab",
"to google's", # link contents (name)
"website" # paragraph text
])
)
# 'title' attribute for link ("conduct a search") should not be announced.
# too often title is used without screen reader users in mind, and is overly verbose.
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Testing the title attribute,", # paragraph text
"link", # link role
"to google's",
"website"
])
)
def test_ariaDescription_browseMode():
_chrome.prepareChrome(_getAriaDescriptionSample())
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"edit multi line This is a line with no annotation"
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# reporting aria-description only supported in Chrome canary 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"User nearby, Aaron", # annotation
"Here is a sentence that is being edited by someone else.", # span text
"Multiple can edit this.", # bold paragraph text
])
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# description-from hasn't reached Chrome stable yet.
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"An element with a role, follow",
"link",
"opens in a new tab",
"to google's", # link contents (name)
"website" # paragraph text
])
)
# 'title' attribute for link ("conduct a search") should not be announced.
# too often title is used without screen reader users in mind, and is overly verbose.
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Testing the title attribute,", # paragraph text
"link", # link role
"to google's",
"website"
])
)
def test_ariaDescription_sayAll():
_chrome.prepareChrome(_getAriaDescriptionSample())
actualSpeech = _chrome.getSpeechAfterKey("NVDA+downArrow")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
"Test page load complete",
"edit multi line This is a line with no annotation",
SPEECH_SEP.join([
"User nearby, Aaron",
"Here is a sentence that is being edited by someone else.",
"Multiple can edit this.",
]),
SPEECH_SEP.join([
"An element with a role, follow",
"link",
"opens in a new tab",
"to google's", # link contents (name)
"website", # paragraph text
]),
# 'title' attribute for link ("conduct a search") should not be announced.
# too often title is used without screen reader users in mind, and is overly verbose.
SPEECH_SEP.join([
"Testing the title attribute,", # paragraph text
"link", # link role
# note description missing when sourced from title attribute
"to google's",
"website",
"out of edit"
]),
"After Test Case Marker"
])
)
def test_i10840():
_chrome.prepareChrome(
f"""
<table>
<thead>
<tr>
<th>Month</th>
<th>items</th>
</tr>
</thead>
<tbody>
<tr>
<td>January</td>
<td>100</td>
</tr>
<tr>
<td>February</td>
<td>80</td>
</tr>
</tbody>
<tfoot>
<tr>
<td>Sum</td>
<td>180</td>
</tr>
</tfoot>
</table>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("t")
_asserts.strings_match(
actualSpeech,
"table with 4 rows and 2 columns row 1 column 1 Month"
)
nextActualSpeech = _chrome.getSpeechAfterKey("control+alt+rightArrow")
_asserts.strings_match(
nextActualSpeech,
"column 2 items"
)
def test_mark_browse():
_chrome.prepareChrome(
"""
<div>
<p>The word <mark>Kangaroo</mark> is important.</p>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
"The word highlighted Kangaroo out of highlighted is important."
)
actualSpeech = _chrome.getSpeechAfterKey("numpad6")
_asserts.strings_match(
actualSpeech,
"word"
)
actualSpeech = _chrome.getSpeechAfterKey("numpad6")
_asserts.strings_match(
actualSpeech,
"highlighted Kangaroo out of highlighted"
)
def test_mark_focus():
_chrome.prepareChrome(
"""
<div>
<p>The word <mark><a href="#">Kangaroo</a></mark> is important.</p>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"highlighted\nKangaroo link"
)
def test_preventDuplicateSpeechFromDescription_browse_tab():
spy = _NvdaLib.getSpyLib()
REPORT_OBJ_DESC_KEY = ["presentation", "reportObjectDescriptions"]
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
_chrome.prepareChrome(
"""
<a href="#" title="apple" style="display:block">apple</a>
<a href="#" title="banana" aria-label="banana" style="display:block">contents</a>
"""
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"apple link"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"banana link"
)
def preventDuplicateSpeechFromDescription_focus():
spy = _NvdaLib.getSpyLib()
REPORT_OBJ_DESC_KEY = ["presentation", "reportObjectDescriptions"]
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
_chrome.prepareChrome(
"""
<a href="#" title="apple" style="display:block">apple</a>
<a href="#" title="banana" aria-label="banana" style="display:block">contents</a>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"apple link"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"banana link"
)
def test_ensureNoBrowseModeDescription():
REPORT_OBJ_DESC_KEY = ["presentation", "reportObjectDescriptions"]
spy = _NvdaLib.getSpyLib()
spy.set_configValue(["braille", "messageTimeout"], 0)
_chrome.prepareChrome(
"\n".join([
r'<button>something for focus</button>'
r'<a href="#" style="display:block" title="Cat">Apple</a>',
# second link to make testing second focus mode tab easier
r'<a href="#" style="display:block" title="Fish">Banana</a>',
])
)
actualSpeech = _NvdaLib.getSpeechAfterKey('tab')
_builtIn.should_contain(actualSpeech, "something for focus")
# Test Browse mode
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey('downArrow')
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"link", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=True"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"lnk", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=True"
)
# move virtual cursor back up to reset to start position
actualSpeech = _NvdaLib.getSpeechAfterKey('upArrow')
_builtIn.should_contain(actualSpeech, "something for focus")
spy.set_configValue(REPORT_OBJ_DESC_KEY, False)
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey('downArrow')
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"link", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=False"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"lnk", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=False"
)
# move virtual cursor back up to reset to start position
actualSpeech = _NvdaLib.getSpeechAfterKey('upArrow')
_builtIn.should_contain(actualSpeech, "something for focus")
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
# Test focus mode
actualSpeech = _NvdaLib.getSpeechAfterKey("nvda+space")
_asserts.speech_matches(actualSpeech, "Focus mode")
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey("tab")
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"Apple", # link name / contents
"link", # role description
"Cat", # link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=True"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"Apple", # link name / contents
"lnk", # role description
"Cat", # link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=True"
)
# Use second link to test focus mode when 'reportObjectDescriptions' is off.
spy.set_configValue(REPORT_OBJ_DESC_KEY, False)
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey("tab")
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"Banana", # link name / contents
"link", # role description
# No link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=False"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"Banana", # link name / contents
"lnk", # role description
# No link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=False"
)
def test_quickNavTargetReporting():
spy = _NvdaLib.getSpyLib()
REPORT_ARTICLES = ["documentFormatting", "reportArticles"]
spy.set_configValue(REPORT_ARTICLES, False)
_chrome.prepareChrome(
"""
<div
aria-describedby="descId"
aria-labelledby="labelId"
role="article"
>
<h1>Quick Nav Target</h1>
<div id="labelId">
<div>Some name.</div>
</div>
<div id="descId">
<span>A bunch of text.</span>
</div>
</div>
"""
)
# Quick nav to heading
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Quick Nav Target", # Heading content (quick nav target), should read first
"heading", # Heading role
"level 1", # Heading level
])
)
# Reset to allow trying again with report articles enabled
actualSpeech = _chrome.getSpeechAfterKey("control+home")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Before Test Case Marker",
])
)
# Quick nav to heading with report articles enabled
spy.set_configValue(REPORT_ARTICLES, True)
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Quick Nav Target", # Heading content (quick nav target), should read first
"heading", # Heading role
"level 1", # Heading level
"article", # article role, enabled via report article
"A bunch of text.", # article (ancestor) description
])
)
def test_focusTargetReporting():
spy = _NvdaLib.getSpyLib()
REPORT_ARTICLES = ["documentFormatting", "reportArticles"]
spy.set_configValue(REPORT_ARTICLES, False)
_chrome.prepareChrome(
"""
<a href="#">before Target</a>
<div
aria-describedby="descId"
aria-labelledby="labelId"
role="article"
>
<a href="#">Focus Target</a>
<div id="labelId">
<div>Some name.</div>
</div>
<div id="descId">
<span>A bunch of text.</span>
</div>
</div>
"""
)
# Set focus
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Focus the link
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
]),
message="browse mode - focus with Report Articles disabled"
)
# Reset to allow trying again with report articles enabled
actualSpeech = _chrome.getSpeechAfterKey("shift+tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Focus the link with report articles enabled
spy.set_configValue(REPORT_ARTICLES, True)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
"article", # article role, enabled via report article
"A bunch of text.", # article (ancestor) description
]),
message="browse mode - focus with Report Articles enabled"
)
# Reset to allow trying again in focus mode
actualSpeech = _chrome.getSpeechAfterKey("shift+tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
spy.set_configValue(REPORT_ARTICLES, False)
# Focus the link
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
SPEECH_SEP.join([
"Some name.", # name for article
"article", # article role, enabled via report article
"A bunch of text.", # description for article
]),
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
]),
]),
message="focus mode - focus with Report Articles disabled"
)
# Reset to allow trying again with report articles enabled
actualSpeech = _chrome.getSpeechAfterKey("shift+tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Focus the link with report articles enabled
spy.set_configValue(REPORT_ARTICLES, True)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
SPEECH_SEP.join([
"Some name.", # name for article
"article", # article role, enabled via report article
"A bunch of text.", # description for article
]),
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
]),
]),
message="focus mode - focus with Report Articles enabled"
)
| true | true |
f724de73bfb07fa9766f490a464f1f8eb216b233 | 738 | py | Python | tags/migrations/0002_auto_20160704_1112.py | making3/summonerqa | 7ab8472b2d24236ba1e6919fa0f00881f4a3e633 | [
"MIT"
] | null | null | null | tags/migrations/0002_auto_20160704_1112.py | making3/summonerqa | 7ab8472b2d24236ba1e6919fa0f00881f4a3e633 | [
"MIT"
] | null | null | null | tags/migrations/0002_auto_20160704_1112.py | making3/summonerqa | 7ab8472b2d24236ba1e6919fa0f00881f4a3e633 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-04 16:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tags', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tag',
name='Category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tags.Category'),
),
migrations.AddField(
model_name='tag',
name='regex',
field=models.CharField(default=None, max_length=100),
preserve_default=False,
),
]
| 26.357143 | 124 | 0.611111 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tags', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tag',
name='Category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tags.Category'),
),
migrations.AddField(
model_name='tag',
name='regex',
field=models.CharField(default=None, max_length=100),
preserve_default=False,
),
]
| true | true |
f724ded074f8fa3a1a1d5041388c8593fb112856 | 924 | py | Python | cogs/slashes.py | mallusrgreatv2/PyHDISCORD | e414976441cbdb3a57b2c545ab164810bebe2e4b | [
"MIT"
] | 2 | 2021-07-05T12:00:39.000Z | 2021-07-05T12:00:49.000Z | cogs/slashes.py | mallusrgreatv2/PyHDISCORD | e414976441cbdb3a57b2c545ab164810bebe2e4b | [
"MIT"
] | null | null | null | cogs/slashes.py | mallusrgreatv2/PyHDISCORD | e414976441cbdb3a57b2c545ab164810bebe2e4b | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from discord.ext.commands import cog
import discord_slash
from discord_slash import cog_ext
class Slashes(commands.Cog):
def __init__(self, client) -> None:
self.client: commands.Bot = client
@commands.Cog.listener()
async def on_ready(self):
print(f"[ {self.__class__.__name__} Cog Loaded ]")
@cog_ext.cog_slash(name = "ping", guild_ids=[853316413649190912], description="Bot's latency")
async def ping(self, ctx):
await ctx.send("Pong! {}".format(str(round(self.client.latency))+"ms"))
@cog_ext.cog_slash(name="say", description="say something with the bot", guild_ids=[853316413649190912])
async def say(ctx: discord_slash.SlashContext, *, text: str):
if '@' in text:
await ctx.send("no")
return
await ctx.send(text)
def setup(client):
client.add_cog(Slashes(client)) | 35.538462 | 108 | 0.676407 | import discord
from discord.ext import commands
from discord.ext.commands import cog
import discord_slash
from discord_slash import cog_ext
class Slashes(commands.Cog):
def __init__(self, client) -> None:
self.client: commands.Bot = client
@commands.Cog.listener()
async def on_ready(self):
print(f"[ {self.__class__.__name__} Cog Loaded ]")
@cog_ext.cog_slash(name = "ping", guild_ids=[853316413649190912], description="Bot's latency")
async def ping(self, ctx):
await ctx.send("Pong! {}".format(str(round(self.client.latency))+"ms"))
@cog_ext.cog_slash(name="say", description="say something with the bot", guild_ids=[853316413649190912])
async def say(ctx: discord_slash.SlashContext, *, text: str):
if '@' in text:
await ctx.send("no")
return
await ctx.send(text)
def setup(client):
client.add_cog(Slashes(client)) | true | true |
f724dee757778c7059a8bbb1f08fe86a9affccc9 | 652 | py | Python | manage_index.py | jdoiro3/myPersonalSite | a61245cfdc497a864c58fd0d9eee27a35f0b52f3 | [
"MIT"
] | 2 | 2021-10-05T03:03:34.000Z | 2022-03-15T12:38:07.000Z | manage_index.py | jdoiro3/myPersonalSite | a61245cfdc497a864c58fd0d9eee27a35f0b52f3 | [
"MIT"
] | null | null | null | manage_index.py | jdoiro3/myPersonalSite | a61245cfdc497a864c58fd0d9eee27a35f0b52f3 | [
"MIT"
] | null | null | null | from modules import index
import argparse
commands = ["cleanup", "re-index"]
parser = argparse.ArgumentParser(description='Manager for the Inverted Index.')
parser.add_argument('command', choices=commands, help='Command to perform on index.')
parser.add_argument('--in_s3', action='store_true', help='If passed, the index will be loaded from the S3 bucket')
parser.add_argument('--file_path', nargs='?', const='index.json', help='The file path for the index.')
args = parser.parse_args()
inv_index = index.InvertedIndex(from_file=True, in_s3=args.in_s3, file_path=args.file_path or 'index.json')
if args.command == "cleanup":
inv_index.cleanup() | 46.571429 | 114 | 0.753067 | from modules import index
import argparse
commands = ["cleanup", "re-index"]
parser = argparse.ArgumentParser(description='Manager for the Inverted Index.')
parser.add_argument('command', choices=commands, help='Command to perform on index.')
parser.add_argument('--in_s3', action='store_true', help='If passed, the index will be loaded from the S3 bucket')
parser.add_argument('--file_path', nargs='?', const='index.json', help='The file path for the index.')
args = parser.parse_args()
inv_index = index.InvertedIndex(from_file=True, in_s3=args.in_s3, file_path=args.file_path or 'index.json')
if args.command == "cleanup":
inv_index.cleanup() | true | true |
f724df091556b7dbed963d14802c99783e73424c | 4,049 | py | Python | pajbot/modules/top.py | Troy-Bot/pajbot | 11b7e86ca270d57d4f35226effc3eb16250e2dfc | [
"MIT"
] | null | null | null | pajbot/modules/top.py | Troy-Bot/pajbot | 11b7e86ca270d57d4f35226effc3eb16250e2dfc | [
"MIT"
] | null | null | null | pajbot/modules/top.py | Troy-Bot/pajbot | 11b7e86ca270d57d4f35226effc3eb16250e2dfc | [
"MIT"
] | null | null | null | import logging
from pajbot.managers.db import DBManager
from pajbot.models.command import Command
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
from pajbot.utils import time_since
log = logging.getLogger(__name__)
class TopModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Top commands"
DESCRIPTION = "Commands that show the top X users of something"
CATEGORY = "Feature"
SETTINGS = [
ModuleSetting(
key="num_top",
label="How many people we should list",
type="number",
required=True,
placeholder="min 1, max 5",
default=3,
constraints={"min_value": 1, "max_value": 5},
),
ModuleSetting(
key="enable_topchatters",
label="Enable the !topchatters command (most messages)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_topwatchers",
label="Enable the !topwatchers command (most time spent watching the stream)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_topoffline",
label="Enable the !topoffline command (most time spent in offline chat)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_toppoints",
label="Enable the !toppoints command (most points)",
type="boolean",
required=True,
default=False,
),
]
def top_chatters(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in db_session.query(User).filter_by(ignored=False).order_by(User.num_lines.desc()).limit(self.settings["num_top"]):
data.append(f"{user} ({user.num_lines})")
bot.say(f"Top {self.settings['num_top']} chatters: {', '.join(data)}")
def top_watchers(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in (
db_session.query(User).filter_by(ignored=False).order_by(User.time_in_chat_online.desc()).limit(self.settings["num_top"])
):
data.append(f"{user} ({time_since(user.time_in_chat_online.total_seconds(), 0, time_format='short')})")
bot.say(f"Top {self.settings['num_top']} watchers: {', '.join(data)}")
def top_offline(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in (
db_session.query(User).filter_by(ignored=False).order_by(User.time_in_chat_offline.desc()).limit(self.settings["num_top"])
):
data.append(f"{user} ({time_since(user.time_in_chat_offline.total_seconds(), 0, time_format='short')})")
bot.say(f"Top {self.settings['num_top']} offline chatters: {', '.join(data)}")
def top_points(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in db_session.query(User).filter_by(ignored=False).order_by(User.points.desc()).limit(self.settings["num_top"]):
data.append(f"{user} ({user.points})")
bot.say(f"Top {self.settings['num_top']} banks: {', '.join(data)}")
def load_commands(self, **options):
if self.settings["enable_topchatters"]:
self.commands["topchatters"] = Command.raw_command(self.top_chatters)
if self.settings["enable_topwatchers"]:
self.commands["topwatchers"] = Command.raw_command(self.top_watchers)
if self.settings["enable_topoffline"]:
self.commands["topoffline"] = Command.raw_command(self.top_offline)
if self.settings["enable_toppoints"]:
self.commands["toppoints"] = Command.raw_command(self.top_points)
| 37.841121 | 138 | 0.605829 | import logging
from pajbot.managers.db import DBManager
from pajbot.models.command import Command
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
from pajbot.utils import time_since
log = logging.getLogger(__name__)
class TopModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Top commands"
DESCRIPTION = "Commands that show the top X users of something"
CATEGORY = "Feature"
SETTINGS = [
ModuleSetting(
key="num_top",
label="How many people we should list",
type="number",
required=True,
placeholder="min 1, max 5",
default=3,
constraints={"min_value": 1, "max_value": 5},
),
ModuleSetting(
key="enable_topchatters",
label="Enable the !topchatters command (most messages)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_topwatchers",
label="Enable the !topwatchers command (most time spent watching the stream)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_topoffline",
label="Enable the !topoffline command (most time spent in offline chat)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_toppoints",
label="Enable the !toppoints command (most points)",
type="boolean",
required=True,
default=False,
),
]
def top_chatters(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in db_session.query(User).filter_by(ignored=False).order_by(User.num_lines.desc()).limit(self.settings["num_top"]):
data.append(f"{user} ({user.num_lines})")
bot.say(f"Top {self.settings['num_top']} chatters: {', '.join(data)}")
def top_watchers(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in (
db_session.query(User).filter_by(ignored=False).order_by(User.time_in_chat_online.desc()).limit(self.settings["num_top"])
):
data.append(f"{user} ({time_since(user.time_in_chat_online.total_seconds(), 0, time_format='short')})")
bot.say(f"Top {self.settings['num_top']} watchers: {', '.join(data)}")
def top_offline(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in (
db_session.query(User).filter_by(ignored=False).order_by(User.time_in_chat_offline.desc()).limit(self.settings["num_top"])
):
data.append(f"{user} ({time_since(user.time_in_chat_offline.total_seconds(), 0, time_format='short')})")
bot.say(f"Top {self.settings['num_top']} offline chatters: {', '.join(data)}")
def top_points(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in db_session.query(User).filter_by(ignored=False).order_by(User.points.desc()).limit(self.settings["num_top"]):
data.append(f"{user} ({user.points})")
bot.say(f"Top {self.settings['num_top']} banks: {', '.join(data)}")
def load_commands(self, **options):
if self.settings["enable_topchatters"]:
self.commands["topchatters"] = Command.raw_command(self.top_chatters)
if self.settings["enable_topwatchers"]:
self.commands["topwatchers"] = Command.raw_command(self.top_watchers)
if self.settings["enable_topoffline"]:
self.commands["topoffline"] = Command.raw_command(self.top_offline)
if self.settings["enable_toppoints"]:
self.commands["toppoints"] = Command.raw_command(self.top_points)
| true | true |
f724df327e8441ac179a887f4d64a5bd5eb292a3 | 3,207 | py | Python | jigs/hpcc/source/lysozyme_we.py | gitter-badger/wepy-1 | 9bc619aeae178ad5d10f658fae2abfd2c7aeb18a | [
"MIT"
] | 35 | 2017-08-22T15:39:06.000Z | 2022-03-20T15:17:52.000Z | jigs/hpcc/source/lysozyme_we.py | gitter-badger/wepy-1 | 9bc619aeae178ad5d10f658fae2abfd2c7aeb18a | [
"MIT"
] | 33 | 2017-10-02T22:04:45.000Z | 2022-03-02T22:19:08.000Z | jigs/hpcc/source/lysozyme_we.py | stxinsite/wepy | 352d4c1316b20e839aae8824eedd66f0f2d0b456 | [
"MIT"
] | 17 | 2018-07-14T15:33:30.000Z | 2022-01-18T16:30:55.000Z | from pympler.asizeof import asizeof
def get_size(obj):
"""get the size in units of Mb"""
return asizeof(obj) / 1000000
if __name__ == "__main__":
# prom.start_http_server(9001)
import os
import shutil
import sys
import logging
from pathlib import Path
# from multiprocessing_logging import install_mp_handler
from wepy_tools.monitoring.prometheus import SimMonitor
from wepy_tools.sim_makers.openmm.lysozyme import LysozymeImplicitOpenMMSimMaker
logging.getLogger().setLevel(logging.DEBUG)
# install_mp_handler()
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
print("arguments: n_cycles, n_steps, n_walkers, n_workers, platform, resampler, work_mapper, tag")
exit()
else:
n_cycles = int(sys.argv[1])
n_steps = int(sys.argv[2])
n_walkers = int(sys.argv[3])
n_workers = int(sys.argv[4])
platform = sys.argv[5]
resampler = sys.argv[6]
work_mapper = sys.argv[7]
tag = sys.argv[8]
print("Number of steps: {}".format(n_steps))
print("Number of cycles: {}".format(n_cycles))
output_dir = Path('_output')
result_dir = output_dir / 'we_lysozyme'
# make the results directory if not already made
try:
shutil.rmtree(result_dir)
except FileNotFoundError:
pass
os.makedirs(result_dir, exist_ok=True)
sim_maker = LysozymeImplicitOpenMMSimMaker()
apparatus = sim_maker.make_apparatus(
integrator='LangevinIntegrator',
resampler=resampler,
bc='UnbindingBC',
platform=platform,
)
work_mapper_spec = work_mapper
work_mapper_class = None
work_mapper_params = {
'platform' : platform,
'device_ids' : [str(i) for i in range(n_workers)],
}
monitor_class = SimMonitor
monitor_params = {
'tag' : tag,
'port' : 9001,
}
config = sim_maker.make_configuration(apparatus,
work_mapper_class=work_mapper_class,
work_mapper_spec=work_mapper_spec,
work_mapper_params=work_mapper_params,
platform=platform,
work_dir=str(result_dir),
monitor_class=monitor_class,
monitor_params=monitor_params,
)
breakpoint()
## set up profiling and initial stats
print("Orchestration objects")
print("----------------------------------------")
print(f"Sim maker size: {get_size(sim_maker)} Mb")
print(f"Apparatus size: {get_size(apparatus)} Mb")
print(f"Configuration size: {get_size(config)} Mb")
print("----------------------------------------\n")
sim_manager = sim_maker.make_sim_manager(n_walkers, apparatus, config)
print("Starting run")
print("----------------------------------------")
sim_manager.run_simulation(n_cycles, n_steps,
num_workers=n_workers)
print("----------------------------------------")
print("Finished run")
| 28.633929 | 106 | 0.565326 | from pympler.asizeof import asizeof
def get_size(obj):
return asizeof(obj) / 1000000
if __name__ == "__main__":
import os
import shutil
import sys
import logging
from pathlib import Path
from wepy_tools.monitoring.prometheus import SimMonitor
from wepy_tools.sim_makers.openmm.lysozyme import LysozymeImplicitOpenMMSimMaker
logging.getLogger().setLevel(logging.DEBUG)
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
print("arguments: n_cycles, n_steps, n_walkers, n_workers, platform, resampler, work_mapper, tag")
exit()
else:
n_cycles = int(sys.argv[1])
n_steps = int(sys.argv[2])
n_walkers = int(sys.argv[3])
n_workers = int(sys.argv[4])
platform = sys.argv[5]
resampler = sys.argv[6]
work_mapper = sys.argv[7]
tag = sys.argv[8]
print("Number of steps: {}".format(n_steps))
print("Number of cycles: {}".format(n_cycles))
output_dir = Path('_output')
result_dir = output_dir / 'we_lysozyme'
try:
shutil.rmtree(result_dir)
except FileNotFoundError:
pass
os.makedirs(result_dir, exist_ok=True)
sim_maker = LysozymeImplicitOpenMMSimMaker()
apparatus = sim_maker.make_apparatus(
integrator='LangevinIntegrator',
resampler=resampler,
bc='UnbindingBC',
platform=platform,
)
work_mapper_spec = work_mapper
work_mapper_class = None
work_mapper_params = {
'platform' : platform,
'device_ids' : [str(i) for i in range(n_workers)],
}
monitor_class = SimMonitor
monitor_params = {
'tag' : tag,
'port' : 9001,
}
config = sim_maker.make_configuration(apparatus,
work_mapper_class=work_mapper_class,
work_mapper_spec=work_mapper_spec,
work_mapper_params=work_mapper_params,
platform=platform,
work_dir=str(result_dir),
monitor_class=monitor_class,
monitor_params=monitor_params,
)
breakpoint()
print("----------------------------------------")
print(f"Sim maker size: {get_size(sim_maker)} Mb")
print(f"Apparatus size: {get_size(apparatus)} Mb")
print(f"Configuration size: {get_size(config)} Mb")
print("----------------------------------------\n")
sim_manager = sim_maker.make_sim_manager(n_walkers, apparatus, config)
print("Starting run")
print("----------------------------------------")
sim_manager.run_simulation(n_cycles, n_steps,
num_workers=n_workers)
print("----------------------------------------")
print("Finished run")
| true | true |
f724e052a84d5bf01809f05e2ce2708627528d63 | 6,634 | py | Python | sendSMSSkillLambda/package/ask_sdk_model/session_ended_request.py | shneydor/aws-alexa-lambda-workshop | 0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4 | [
"Apache-2.0"
] | null | null | null | sendSMSSkillLambda/package/ask_sdk_model/session_ended_request.py | shneydor/aws-alexa-lambda-workshop | 0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4 | [
"Apache-2.0"
] | null | null | null | sendSMSSkillLambda/package/ask_sdk_model/session_ended_request.py | shneydor/aws-alexa-lambda-workshop | 0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4 | [
"Apache-2.0"
] | 1 | 2019-10-11T17:15:20.000Z | 2019-10-11T17:15:20.000Z | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.request import Request
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.session_ended_error import SessionEndedError
from ask_sdk_model.session_ended_reason import SessionEndedReason
class SessionEndedRequest(Request):
"""
A SessionEndedRequest is an object that represents a request made to an Alexa skill to notify that a session was ended. Your service receives a SessionEndedRequest when a currently open session is closed for one of the following reasons: <ol><li>The user says “exit”</li><li>the user does not respond or says something that does not match an intent defined in your voice interface while the device is listening for the user’s response</li><li>an error occurs</li></ol>
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param reason: Describes why the session ended.
:type reason: (optional) ask_sdk_model.session_ended_reason.SessionEndedReason
:param error: An error object providing more information about the error that occurred.
:type error: (optional) ask_sdk_model.session_ended_error.SessionEndedError
"""
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'locale': 'str',
'reason': 'ask_sdk_model.session_ended_reason.SessionEndedReason',
'error': 'ask_sdk_model.session_ended_error.SessionEndedError'
} # type: Dict
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'locale': 'locale',
'reason': 'reason',
'error': 'error'
} # type: Dict
def __init__(self, request_id=None, timestamp=None, locale=None, reason=None, error=None):
# type: (Optional[str], Optional[datetime], Optional[str], Optional[SessionEndedReason], Optional[SessionEndedError]) -> None
"""A SessionEndedRequest is an object that represents a request made to an Alexa skill to notify that a session was ended. Your service receives a SessionEndedRequest when a currently open session is closed for one of the following reasons: <ol><li>The user says “exit”</li><li>the user does not respond or says something that does not match an intent defined in your voice interface while the device is listening for the user’s response</li><li>an error occurs</li></ol>
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param reason: Describes why the session ended.
:type reason: (optional) ask_sdk_model.session_ended_reason.SessionEndedReason
:param error: An error object providing more information about the error that occurred.
:type error: (optional) ask_sdk_model.session_ended_error.SessionEndedError
"""
self.__discriminator_value = "SessionEndedRequest" # type: str
self.object_type = self.__discriminator_value
super(SessionEndedRequest, self).__init__(object_type=self.__discriminator_value, request_id=request_id, timestamp=timestamp, locale=locale)
self.reason = reason
self.error = error
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SessionEndedRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 47.385714 | 527 | 0.668074 |
import pprint
import re
import six
import typing
from enum import Enum
from ask_sdk_model.request import Request
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.session_ended_error import SessionEndedError
from ask_sdk_model.session_ended_reason import SessionEndedReason
class SessionEndedRequest(Request):
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'locale': 'str',
'reason': 'ask_sdk_model.session_ended_reason.SessionEndedReason',
'error': 'ask_sdk_model.session_ended_error.SessionEndedError'
}
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'locale': 'locale',
'reason': 'reason',
'error': 'error'
}
def __init__(self, request_id=None, timestamp=None, locale=None, reason=None, error=None):
self.__discriminator_value = "SessionEndedRequest"
self.object_type = self.__discriminator_value
super(SessionEndedRequest, self).__init__(object_type=self.__discriminator_value, request_id=request_id, timestamp=timestamp, locale=locale)
self.reason = reason
self.error = error
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SessionEndedRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f724e0dae8457b34df64dc725e37573bd868d2fc | 1,108 | py | Python | gym-unity/setup.py | alexcercos/ML-Agents | c096c36b0348e3673b687499e17891cd35168939 | [
"Apache-2.0"
] | 1 | 2019-12-29T13:40:16.000Z | 2019-12-29T13:40:16.000Z | gym-unity/setup.py | alexcercos/ML-Agents | c096c36b0348e3673b687499e17891cd35168939 | [
"Apache-2.0"
] | null | null | null | gym-unity/setup.py | alexcercos/ML-Agents | c096c36b0348e3673b687499e17891cd35168939 | [
"Apache-2.0"
] | 2 | 2020-08-16T14:18:16.000Z | 2022-03-18T12:22:54.000Z | #!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.install import install
VERSION = "0.11.0"
class VerifyVersionCommand(install):
"""
Custom command to verify that the git tag matches our version
See https://circleci.com/blog/continuously-deploying-python-packages-to-pypi-with-circleci/
"""
description = "verify that the git tag matches our version"
def run(self):
tag = os.getenv("CIRCLE_TAG")
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name="gym_unity",
version=VERSION,
description="Unity Machine Learning Agents Gym Interface",
license="Apache License 2.0",
author="Unity Technologies",
author_email="ML-Agents@unity3d.com",
url="https://github.com/Unity-Technologies/ml-agents",
packages=find_packages(),
install_requires=["gym", "mlagents_envs=={}".format(VERSION)],
cmdclass={"verify": VerifyVersionCommand},
)
| 27.02439 | 95 | 0.666968 |
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.install import install
VERSION = "0.11.0"
class VerifyVersionCommand(install):
description = "verify that the git tag matches our version"
def run(self):
tag = os.getenv("CIRCLE_TAG")
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name="gym_unity",
version=VERSION,
description="Unity Machine Learning Agents Gym Interface",
license="Apache License 2.0",
author="Unity Technologies",
author_email="ML-Agents@unity3d.com",
url="https://github.com/Unity-Technologies/ml-agents",
packages=find_packages(),
install_requires=["gym", "mlagents_envs=={}".format(VERSION)],
cmdclass={"verify": VerifyVersionCommand},
)
| true | true |
f724e1095b8e197a2c35d40a6c7744239f4d58e6 | 2,426 | py | Python | webapp/web_app.py | baishalidutta/Comments-Toxicity-Detection | c56cd2eb02983c418cbe91fc4a2a257067cdcb89 | [
"Apache-2.0"
] | 7 | 2021-01-11T05:57:18.000Z | 2022-01-14T21:51:54.000Z | webapp/web_app.py | baishalidutta/Comments-Toxicity-Detection | c56cd2eb02983c418cbe91fc4a2a257067cdcb89 | [
"Apache-2.0"
] | 1 | 2021-04-09T17:00:57.000Z | 2021-04-09T17:00:57.000Z | webapp/web_app.py | baishalidutta/Comments-Toxicity-Detection | c56cd2eb02983c418cbe91fc4a2a257067cdcb89 | [
"Apache-2.0"
] | 1 | 2021-02-20T23:47:26.000Z | 2021-02-20T23:47:26.000Z | __author__ = "Baishali Dutta"
__copyright__ = "Copyright (C) 2021 Baishali Dutta"
__license__ = "Apache License 2.0"
__version__ = "0.1"
# -------------------------------------------------------------------------
# Import Libraries
# -------------------------------------------------------------------------
import pickle
import gradio as gr
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from source.config import *
from source.data_cleaning import clean_text
# -------------------------------------------------------------------------
# Load Existing Model and Tokenizer
# -------------------------------------------------------------------------
# load the trained model
rnn_model = load_model(MODEL_LOC)
# load the tokenizer
with open(TOKENIZER_LOC, 'rb') as handle:
tokenizer = pickle.load(handle)
# -------------------------------------------------------------------------
# Main Application
# -------------------------------------------------------------------------
def make_prediction(input_comment):
"""
Predicts the toxicity of the specified comment
:param input_comment: the comment to be verified
"""
input_comment = clean_text(input_comment)
input_comment = input_comment.split(" ")
sequences = tokenizer.texts_to_sequences(input_comment)
sequences = [[item for sublist in sequences for item in sublist]]
padded_data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
result = rnn_model.predict(padded_data, len(padded_data), verbose=1)
return \
{
"Toxic": str(result[0][0]),
"Very Toxic": str(result[0][1]),
"Obscene": str(result[0][2]),
"Threat": str(result[0][3]),
"Insult": str(result[0][4]),
"Hate": str(result[0][5]),
"Neutral": str(result[0][6])
}
comment = gr.inputs.Textbox(lines=17, placeholder="Enter your comment here")
title = "Comments Toxicity Detection"
description = "This application uses a Bidirectional Long Short-Term Memory (LSTM) Recurrent Neural Network (RNN) " \
"model to predict the inappropriateness of a comment"
gr.Interface(fn=make_prediction,
inputs=comment,
outputs="label",
title=title,
description=description) \
.launch()
| 33.694444 | 117 | 0.528854 | __author__ = "Baishali Dutta"
__copyright__ = "Copyright (C) 2021 Baishali Dutta"
__license__ = "Apache License 2.0"
__version__ = "0.1"
import pickle
import gradio as gr
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from source.config import *
from source.data_cleaning import clean_text
rnn_model = load_model(MODEL_LOC)
with open(TOKENIZER_LOC, 'rb') as handle:
tokenizer = pickle.load(handle)
def make_prediction(input_comment):
input_comment = clean_text(input_comment)
input_comment = input_comment.split(" ")
sequences = tokenizer.texts_to_sequences(input_comment)
sequences = [[item for sublist in sequences for item in sublist]]
padded_data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
result = rnn_model.predict(padded_data, len(padded_data), verbose=1)
return \
{
"Toxic": str(result[0][0]),
"Very Toxic": str(result[0][1]),
"Obscene": str(result[0][2]),
"Threat": str(result[0][3]),
"Insult": str(result[0][4]),
"Hate": str(result[0][5]),
"Neutral": str(result[0][6])
}
comment = gr.inputs.Textbox(lines=17, placeholder="Enter your comment here")
title = "Comments Toxicity Detection"
description = "This application uses a Bidirectional Long Short-Term Memory (LSTM) Recurrent Neural Network (RNN) " \
"model to predict the inappropriateness of a comment"
gr.Interface(fn=make_prediction,
inputs=comment,
outputs="label",
title=title,
description=description) \
.launch()
| true | true |
f724e1cf06b432b67c696656847168d974deac36 | 2,657 | py | Python | test/programytest/parser/template/graph_tests/test_eval.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 5 | 2018-08-21T00:13:45.000Z | 2018-09-01T20:00:55.000Z | test/programytest/parser/template/graph_tests/test_eval.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 1 | 2018-09-12T18:30:17.000Z | 2018-09-12T18:30:17.000Z | test/programytest/parser/template/graph_tests/test_eval.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 5 | 2018-08-21T00:08:36.000Z | 2018-09-23T06:11:04.000Z | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.word import TemplateWordNode
from programy.parser.template.nodes.get import TemplateGetNode
from programy.parser.template.nodes.eval import TemplateEvalNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphEvalTests(TemplateGraphTestClient):
def test_eval_node_from_xml_single_word(self):
template = ET.fromstring("""
<template>
<eval>Text</eval>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateEvalNode)
def test_eval_node_from_xml_multi_words(self):
template = ET.fromstring("""
<template>
<eval>Some Text</eval>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateEvalNode)
self.assertEqual(len(node.children), 2)
self.assertIsInstance(node.children[0], TemplateWordNode)
self.assertEqual(node.children[0].word, "Some")
self.assertIsInstance(node.children[1], TemplateWordNode)
self.assertEqual(node.children[1].word, "Text")
def test_eval_node_from_xml_multi_words(self):
template = ET.fromstring("""
<template>
<eval>Some <get name="SomeGet" /> Text</eval>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateEvalNode)
self.assertEqual(len(node.children), 3)
self.assertIsInstance(node.children[0], TemplateWordNode)
self.assertEqual(node.children[0].word, "Some")
self.assertIsInstance(node.children[1], TemplateGetNode)
self.assertIsInstance(node.children[2], TemplateWordNode)
self.assertEqual(node.children[2].word, "Text")
| 36.39726 | 94 | 0.705307 | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.word import TemplateWordNode
from programy.parser.template.nodes.get import TemplateGetNode
from programy.parser.template.nodes.eval import TemplateEvalNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphEvalTests(TemplateGraphTestClient):
def test_eval_node_from_xml_single_word(self):
template = ET.fromstring("""
<template>
<eval>Text</eval>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateEvalNode)
def test_eval_node_from_xml_multi_words(self):
template = ET.fromstring("""
<template>
<eval>Some Text</eval>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateEvalNode)
self.assertEqual(len(node.children), 2)
self.assertIsInstance(node.children[0], TemplateWordNode)
self.assertEqual(node.children[0].word, "Some")
self.assertIsInstance(node.children[1], TemplateWordNode)
self.assertEqual(node.children[1].word, "Text")
def test_eval_node_from_xml_multi_words(self):
template = ET.fromstring("""
<template>
<eval>Some <get name="SomeGet" /> Text</eval>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateEvalNode)
self.assertEqual(len(node.children), 3)
self.assertIsInstance(node.children[0], TemplateWordNode)
self.assertEqual(node.children[0].word, "Some")
self.assertIsInstance(node.children[1], TemplateGetNode)
self.assertIsInstance(node.children[2], TemplateWordNode)
self.assertEqual(node.children[2].word, "Text")
| true | true |
f724e27067df0d8b936028ff1d33b38c5cfba530 | 462 | py | Python | djangoapp_cloudedbats_bat_activity/urls.py | cloudedbats/cloudedbats_web_archive | 39e571aa88efd149fd07b4ecc33207af44276c9b | [
"MIT"
] | null | null | null | djangoapp_cloudedbats_bat_activity/urls.py | cloudedbats/cloudedbats_web_archive | 39e571aa88efd149fd07b4ecc33207af44276c9b | [
"MIT"
] | null | null | null | djangoapp_cloudedbats_bat_activity/urls.py | cloudedbats/cloudedbats_web_archive | 39e571aa88efd149fd07b4ecc33207af44276c9b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Project: http://cloudedbats.org
# Copyright (c) 2016 Arnold Andreasson
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
from django.conf.urls import url, include
# import cloudedbats_django.djangoapp_cloudedbats_species.views as species_views
import djangoapp_cloudedbats_bat_activity.views as bat_activity_views
urlpatterns = [
url(r'^', bat_activity_views.bat_activity),
]
| 28.875 | 80 | 0.772727 |
from django.conf.urls import url, include
import djangoapp_cloudedbats_bat_activity.views as bat_activity_views
urlpatterns = [
url(r'^', bat_activity_views.bat_activity),
]
| true | true |
f724e28c80153996114878fb2122ab04143fb7c4 | 5,426 | py | Python | tests/opentracer/core/test_span.py | brettlangdon/dd-trace-py | 95e2641d734669719ca07841de58e233cb0f49e9 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/opentracer/core/test_span.py | brettlangdon/dd-trace-py | 95e2641d734669719ca07841de58e233cb0f49e9 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2021-10-07T02:22:59.000Z | 2021-12-15T02:15:48.000Z | tests/opentracer/core/test_span.py | depop/dd-trace-py | 95e2641d734669719ca07841de58e233cb0f49e9 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-09-28T06:20:53.000Z | 2020-09-28T06:20:53.000Z | import pytest
from ddtrace.opentracer.span import Span
from tests.utils import DummyTracer
@pytest.fixture
def nop_tracer():
from ddtrace.opentracer import Tracer
tracer = Tracer(service_name="mysvc", config={})
# use the same test tracer used by the primary tests
tracer._tracer = DummyTracer()
return tracer
@pytest.fixture
def nop_span_ctx():
from ddtrace.constants import AUTO_KEEP
from ddtrace.opentracer.span_context import SpanContext
return SpanContext(sampling_priority=AUTO_KEEP)
@pytest.fixture
def nop_span(nop_tracer, nop_span_ctx):
return Span(nop_tracer, nop_span_ctx, "my_op_name")
class TestSpan(object):
"""Test the Datadog OpenTracing Span implementation."""
def test_init(self, nop_tracer, nop_span_ctx):
"""Very basic test for skeleton code"""
span = Span(nop_tracer, nop_span_ctx, "my_op_name")
assert not span.finished
def test_tags(self, nop_span):
"""Set a tag and get it back."""
nop_span.set_tag("test", 23)
assert nop_span._get_metric("test") == 23
def test_set_baggage(self, nop_span):
"""Test setting baggage."""
r = nop_span.set_baggage_item("test", 23)
assert r is nop_span
r = nop_span.set_baggage_item("1", 1).set_baggage_item("2", 2)
assert r is nop_span
def test_get_baggage(self, nop_span):
"""Test setting and getting baggage."""
# test a single item
nop_span.set_baggage_item("test", 23)
assert int(nop_span.get_baggage_item("test")) == 23
# test multiple items
nop_span.set_baggage_item("1", "1").set_baggage_item("2", 2)
assert int(nop_span.get_baggage_item("test")) == 23
assert nop_span.get_baggage_item("1") == "1"
assert int(nop_span.get_baggage_item("2")) == 2
def test_log_kv(self, nop_span):
"""Ensure logging values doesn't break anything."""
# just log a bunch of values
nop_span.log_kv({"myval": 2})
nop_span.log_kv({"myval2": 3})
nop_span.log_kv({"myval3": 5})
nop_span.log_kv({"myval": 2})
def test_log_dd_kv(self, nop_span):
"""Ensure keys that can be handled by our impl. are indeed handled."""
import traceback
from ddtrace.ext import errors
stack_trace = str(traceback.format_stack())
nop_span.log_kv(
{
"event": "error",
"error": 3,
"message": "my error message",
"stack": stack_trace,
}
)
# Ensure error flag is set...
assert nop_span._dd_span.error
# ...and that error tags are set with the correct key
assert nop_span._get_tag(errors.ERROR_STACK) == stack_trace
assert nop_span._get_tag(errors.ERROR_MSG) == "my error message"
assert nop_span._get_metric(errors.ERROR_TYPE) == 3
def test_operation_name(self, nop_span):
"""Sanity check for setting the operation name."""
# just try setting the operation name
nop_span.set_operation_name("new_op_name")
assert nop_span._dd_span.name == "new_op_name"
def test_context_manager(self, nop_span):
"""Test the span context manager."""
import time
assert not nop_span.finished
# run the context manager but since the span has not been added
# to the span context, we will not get any traces
with nop_span:
time.sleep(0.005)
# span should be finished when the context manager exits
assert nop_span.finished
# there should be no traces (see above comment)
spans = nop_span.tracer._tracer.pop()
assert len(spans) == 0
def test_immutable_span_context(self, nop_span):
"""Ensure span contexts are immutable."""
before_ctx = nop_span._context
nop_span.set_baggage_item("key", "value")
after_ctx = nop_span._context
# should be different contexts
assert before_ctx is not after_ctx
class TestSpanCompatibility(object):
"""Ensure our opentracer spans features correspond to datadog span features."""
def test_set_tag(self, nop_span):
nop_span.set_tag("test", 2)
assert nop_span._get_metric("test") == 2
def test_tag_resource_name(self, nop_span):
nop_span.set_tag("resource.name", "myresource")
assert nop_span._dd_span.resource == "myresource"
def test_tag_span_type(self, nop_span):
nop_span.set_tag("span.type", "db")
assert nop_span._dd_span.span_type == "db"
def test_tag_service_name(self, nop_span):
nop_span.set_tag("service.name", "mysvc234")
assert nop_span._dd_span.service == "mysvc234"
def test_tag_db_statement(self, nop_span):
nop_span.set_tag("db.statement", "SELECT * FROM USERS")
assert nop_span._dd_span.resource == "SELECT * FROM USERS"
def test_tag_peer_hostname(self, nop_span):
nop_span.set_tag("peer.hostname", "peername")
assert nop_span._dd_span.get_tag("out.host") == "peername"
def test_tag_peer_port(self, nop_span):
nop_span.set_tag("peer.port", 55555)
assert nop_span._get_metric("out.port") == 55555
def test_tag_sampling_priority(self, nop_span):
nop_span.set_tag("sampling.priority", "2")
assert nop_span._dd_span.context.sampling_priority == "2"
| 33.9125 | 83 | 0.653336 | import pytest
from ddtrace.opentracer.span import Span
from tests.utils import DummyTracer
@pytest.fixture
def nop_tracer():
from ddtrace.opentracer import Tracer
tracer = Tracer(service_name="mysvc", config={})
tracer._tracer = DummyTracer()
return tracer
@pytest.fixture
def nop_span_ctx():
from ddtrace.constants import AUTO_KEEP
from ddtrace.opentracer.span_context import SpanContext
return SpanContext(sampling_priority=AUTO_KEEP)
@pytest.fixture
def nop_span(nop_tracer, nop_span_ctx):
return Span(nop_tracer, nop_span_ctx, "my_op_name")
class TestSpan(object):
def test_init(self, nop_tracer, nop_span_ctx):
span = Span(nop_tracer, nop_span_ctx, "my_op_name")
assert not span.finished
def test_tags(self, nop_span):
nop_span.set_tag("test", 23)
assert nop_span._get_metric("test") == 23
def test_set_baggage(self, nop_span):
r = nop_span.set_baggage_item("test", 23)
assert r is nop_span
r = nop_span.set_baggage_item("1", 1).set_baggage_item("2", 2)
assert r is nop_span
def test_get_baggage(self, nop_span):
nop_span.set_baggage_item("test", 23)
assert int(nop_span.get_baggage_item("test")) == 23
nop_span.set_baggage_item("1", "1").set_baggage_item("2", 2)
assert int(nop_span.get_baggage_item("test")) == 23
assert nop_span.get_baggage_item("1") == "1"
assert int(nop_span.get_baggage_item("2")) == 2
def test_log_kv(self, nop_span):
nop_span.log_kv({"myval": 2})
nop_span.log_kv({"myval2": 3})
nop_span.log_kv({"myval3": 5})
nop_span.log_kv({"myval": 2})
def test_log_dd_kv(self, nop_span):
import traceback
from ddtrace.ext import errors
stack_trace = str(traceback.format_stack())
nop_span.log_kv(
{
"event": "error",
"error": 3,
"message": "my error message",
"stack": stack_trace,
}
)
assert nop_span._dd_span.error
assert nop_span._get_tag(errors.ERROR_STACK) == stack_trace
assert nop_span._get_tag(errors.ERROR_MSG) == "my error message"
assert nop_span._get_metric(errors.ERROR_TYPE) == 3
def test_operation_name(self, nop_span):
nop_span.set_operation_name("new_op_name")
assert nop_span._dd_span.name == "new_op_name"
def test_context_manager(self, nop_span):
import time
assert not nop_span.finished
with nop_span:
time.sleep(0.005)
assert nop_span.finished
spans = nop_span.tracer._tracer.pop()
assert len(spans) == 0
def test_immutable_span_context(self, nop_span):
before_ctx = nop_span._context
nop_span.set_baggage_item("key", "value")
after_ctx = nop_span._context
assert before_ctx is not after_ctx
class TestSpanCompatibility(object):
def test_set_tag(self, nop_span):
nop_span.set_tag("test", 2)
assert nop_span._get_metric("test") == 2
def test_tag_resource_name(self, nop_span):
nop_span.set_tag("resource.name", "myresource")
assert nop_span._dd_span.resource == "myresource"
def test_tag_span_type(self, nop_span):
nop_span.set_tag("span.type", "db")
assert nop_span._dd_span.span_type == "db"
def test_tag_service_name(self, nop_span):
nop_span.set_tag("service.name", "mysvc234")
assert nop_span._dd_span.service == "mysvc234"
def test_tag_db_statement(self, nop_span):
nop_span.set_tag("db.statement", "SELECT * FROM USERS")
assert nop_span._dd_span.resource == "SELECT * FROM USERS"
def test_tag_peer_hostname(self, nop_span):
nop_span.set_tag("peer.hostname", "peername")
assert nop_span._dd_span.get_tag("out.host") == "peername"
def test_tag_peer_port(self, nop_span):
nop_span.set_tag("peer.port", 55555)
assert nop_span._get_metric("out.port") == 55555
def test_tag_sampling_priority(self, nop_span):
nop_span.set_tag("sampling.priority", "2")
assert nop_span._dd_span.context.sampling_priority == "2"
| true | true |
f724e2e16afd314dfd71391ec47943c9a4b364d9 | 7,749 | py | Python | src/shimoku_api_python/configuration.py | shimoku-tech/shimoku-api-python | de26e7d80631647e68794277b15397403336f252 | [
"MIT"
] | 4 | 2021-12-23T15:51:21.000Z | 2022-01-25T08:55:31.000Z | src/shimoku_api_python/configuration.py | shimoku-tech/shimoku-api-python | de26e7d80631647e68794277b15397403336f252 | [
"MIT"
] | null | null | null | src/shimoku_api_python/configuration.py | shimoku-tech/shimoku-api-python | de26e7d80631647e68794277b15397403336f252 | [
"MIT"
] | 1 | 2022-03-02T01:13:04.000Z | 2022-03-02T01:13:04.000Z | """"""
import copy
import logging
import multiprocessing
import sys
import urllib3
class Configuration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
_default = None
def __init__(self):
"""Constructor"""
if self._default:
for key in self._default.__dict__.keys():
self.__dict__[key] = copy.copy(self._default.__dict__[key])
return
# Default Base url
self.host = "https://server.api.mailchimp.com/3.0"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# function to refresh API key if expired
self.refresh_api_key_hook = None
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("mailchimp_marketing")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@classmethod
def set_default(cls, default):
cls._default = default
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'basicAuth':
{
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
},
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 3.0.70\n"\
"SDK Package Version: 3.0.70".\
format(env=sys.platform, pyversion=sys.version)
| 34.748879 | 80 | 0.599303 |
import copy
import logging
import multiprocessing
import sys
import urllib3
class Configuration(object):
_default = None
def __init__(self):
if self._default:
for key in self._default.__dict__.keys():
self.__dict__[key] = copy.copy(self._default.__dict__[key])
return
self.host = "https://server.api.mailchimp.com/3.0"
self.temp_folder_path = None
self.api_key = {}
self.api_key_prefix = {}
self.refresh_api_key_hook = None
self.username = ""
self.password = ""
self.logger = {}
self.logger["package_logger"] = logging.getLogger("mailchimp_marketing")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
self.logger_stream_handler = None
self.logger_file_handler = None
self.logger_file = None
self.debug = False
self.verify_ssl = True
self.ssl_ca_cert = None
self.cert_file = None
self.key_file = None
self.assert_hostname = None
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@classmethod
def set_default(cls, default):
cls._default = default
@property
def logger_file(self):
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
return self.__debug
@debug.setter
def debug(self, value):
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
if self.refresh_api_key_hook:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
return {
'basicAuth':
{
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
},
}
def to_debug_report(self):
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 3.0.70\n"\
"SDK Package Version: 3.0.70".\
format(env=sys.platform, pyversion=sys.version)
| true | true |
f724e3bba3d8657105215851506d1fc853c91c4f | 96 | py | Python | venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/parsers/earley_common.py | GiulianaPola/select_repeats | 17a0d053d4f874e42cf654dd142168c2ec8fbd11 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/parsers/earley_common.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/parsers/earley_common.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/19/95/b4/79c323071b97ca9595e50046631f61416d54102a34467d3d73b54737d7 | 96 | 96 | 0.895833 | /home/runner/.cache/pip/pool/19/95/b4/79c323071b97ca9595e50046631f61416d54102a34467d3d73b54737d7 | false | true |
f724e4bc2f3b9f7eafff6e16166f0dbe55dce02c | 1,138 | py | Python | send_msg.py | nocholasrift/wordlebot | c97576697c47a1f5a35722af78a6758ba9a325bf | [
"MIT"
] | null | null | null | send_msg.py | nocholasrift/wordlebot | c97576697c47a1f5a35722af78a6758ba9a325bf | [
"MIT"
] | null | null | null | send_msg.py | nocholasrift/wordlebot | c97576697c47a1f5a35722af78a6758ba9a325bf | [
"MIT"
] | null | null | null | import os
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
client = WebClient(token="xoxb-435046985394-3004455722741-fpIQQHskeFNILHcT3hGoPIF7");
channel_id="wordle"
is_solved = True
guesses = []
with open("tmp", "r") as f:
for line in f:
line = line.strip()
if line == "IMPOSSIBLE":
is_solved = False
continue
if line == "DONE":
continue
print(line)
guesses.append(line)
map_ = {'x': ':black_large_square:', 'y': ':large_blue_square:', 'g': ":large_orange_square:"}
text=f'Wordle 220 {len(guesses)}/6\n\n'
for guess in guesses:
for cell in guess:
text+=map_[cell]
text+="\n"
print(guesses)
try:
# Call the conversations.list method using the WebClient
result = client.chat_postMessage(
username="wordlebot",
icon_emoji=":large_green_square",
channel=channel_id,
text=text
# You could also use a blocks[] array to send richer content
)
# Print result, which includes information about the message (like TS)
print(result)
except SlackApiError as e:
print(f"Error: {e}")
| 22.313725 | 95 | 0.655536 | import os
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
client = WebClient(token="xoxb-435046985394-3004455722741-fpIQQHskeFNILHcT3hGoPIF7");
channel_id="wordle"
is_solved = True
guesses = []
with open("tmp", "r") as f:
for line in f:
line = line.strip()
if line == "IMPOSSIBLE":
is_solved = False
continue
if line == "DONE":
continue
print(line)
guesses.append(line)
map_ = {'x': ':black_large_square:', 'y': ':large_blue_square:', 'g': ":large_orange_square:"}
text=f'Wordle 220 {len(guesses)}/6\n\n'
for guess in guesses:
for cell in guess:
text+=map_[cell]
text+="\n"
print(guesses)
try:
result = client.chat_postMessage(
username="wordlebot",
icon_emoji=":large_green_square",
channel=channel_id,
text=text
)
print(result)
except SlackApiError as e:
print(f"Error: {e}")
| true | true |
f724e59cba98bdc89e3b0cc4fb9742fa0ed18df5 | 17,717 | py | Python | vital/bindings/python/vital/tests/test_rotation.py | acidburn0zzz/kwiver | 6e4205f1c46df04759c57c040f01cc804b27e00d | [
"BSD-3-Clause"
] | 1 | 2017-07-31T07:07:32.000Z | 2017-07-31T07:07:32.000Z | vital/bindings/python/vital/tests/test_rotation.py | Acidburn0zzz/kwiver | 6e4205f1c46df04759c57c040f01cc804b27e00d | [
"BSD-3-Clause"
] | 3 | 2021-03-19T15:39:43.000Z | 2021-09-08T02:47:15.000Z | vital/bindings/python/vital/tests/test_rotation.py | acidburn0zzz/kwiver | 6e4205f1c46df04759c57c040f01cc804b27e00d | [
"BSD-3-Clause"
] | null | null | null | """
ckwg +31
Copyright 2016-2017 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for vital.types.Rotation class
"""
import ctypes
import math
import unittest
import nose.tools
import numpy
from vital.types import Rotation
def array_normalize(a, dtype=None):
a = numpy.asarray(a, dtype)
return a / numpy.linalg.norm(a)
class TestVitalRotation (unittest.TestCase):
def test_new_default(self):
# That these even construct
rot_d = Rotation(ctypes.c_double)
nose.tools.assert_equal(rot_d._ctype, ctypes.c_double)
nose.tools.assert_equal(rot_d._spec, 'd')
rot_f = Rotation(ctypes.c_float)
nose.tools.assert_equal(rot_f._ctype, ctypes.c_float)
nose.tools.assert_equal(rot_f._spec, 'f')
def test_eq(self):
# Identities should equal
r1 = Rotation(ctypes.c_double)
r2 = Rotation(ctypes.c_double)
nose.tools.assert_equal(r1, r2)
r1 = Rotation(ctypes.c_float)
r2 = Rotation(ctypes.c_float)
nose.tools.assert_equal(r1, r2)
r1 = Rotation(ctypes.c_double)
r2 = Rotation(ctypes.c_float)
# r2 should get converted into a double instance for checking
nose.tools.assert_equal(r1, r2)
r1 = Rotation.from_quaternion([1,2,3,4], ctype=ctypes.c_double)
r2 = Rotation.from_quaternion([1,2,3,4], ctype=ctypes.c_double)
nose.tools.assert_equal(r1, r2)
r1 = Rotation.from_quaternion([1,2,3,4], ctype=ctypes.c_double)
r2 = Rotation.from_quaternion([-1,-2,-3,-4], ctype=ctypes.c_double)
assert r1.angle_from(r2) < 1e-12
def test_to_matrix(self):
# Default value should be identity
rot_d = Rotation(ctypes.c_double)
numpy.testing.assert_array_equal(
rot_d.matrix(), numpy.eye(3)
)
rot_f = Rotation(ctypes.c_float)
numpy.testing.assert_array_equal(
rot_f.matrix(), numpy.eye(3)
)
def test_to_quaternion(self):
rot_d = Rotation(ctypes.c_double)
numpy.testing.assert_array_equal(rot_d.quaternion(),
[[0],
[0],
[0],
[1]])
rot_f = Rotation(ctypes.c_float)
numpy.testing.assert_array_equal(rot_f.quaternion(),
[[0],
[0],
[0],
[1]])
def test_to_axis_angle(self):
# expected identity: [0,0,1] and 0
ident_axis = [[0],
[0],
[1]]
ident_angle = 0
rot_d = Rotation(ctypes.c_double)
rot_f = Rotation(ctypes.c_float)
numpy.testing.assert_equal(rot_d.axis(), ident_axis)
nose.tools.assert_equal(rot_d.angle(), ident_angle)
numpy.testing.assert_equal(rot_f.axis(), ident_axis)
nose.tools.assert_equal(rot_f.angle(), ident_angle)
def test_to_rodrigues(self):
# rodrigues identity: [0,0,0]
ident_rod = [[0],
[0],
[0]]
rot_d = Rotation(ctypes.c_double)
rot_f = Rotation(ctypes.c_float)
rod = rot_d.rodrigues()
numpy.testing.assert_equal(rod, ident_rod)
rod = rot_f.rodrigues()
numpy.testing.assert_equal(rod, ident_rod)
def test_to_ypr(self):
# ypr identity: (pi/2, 0, pi)
ident_ypr = (math.pi / 2, 0, -math.pi)
ident_ypr_float = map(lambda v: ctypes.c_float(v).value, ident_ypr)
rot_d = Rotation(ctypes.c_double)
rot_f = Rotation(ctypes.c_float)
numpy.testing.assert_equal(
rot_d.yaw_pitch_roll(),
ident_ypr
)
numpy.testing.assert_equal(
rot_f.yaw_pitch_roll(),
ident_ypr_float
)
def test_from_quaternion(self):
q = array_normalize([[+2],
[-1],
[-3],
[+0]], float)
r = Rotation.from_quaternion(q)
numpy.testing.assert_equal(
r.quaternion(), q
)
def test_from_rodrigues(self):
rod_list_1 = [[0],
[0],
[0]]
r1 = Rotation.from_rodrigues(rod_list_1)
numpy.testing.assert_equal(r1.rodrigues(), rod_list_1)
# This one will get normalized by magnitude in rotation instance
# This vector's is less than 2*pi, so we should expect this vector to be
# returned as is.
rod2 = numpy.array([[ 2],
[ -1],
[0.5]])
nod2_normed = array_normalize(rod2)
print 'r2 2-norm:', numpy.linalg.norm(rod2)
print 'r2-normed:', nod2_normed
r2 = Rotation.from_rodrigues(rod2)
numpy.testing.assert_array_almost_equal(
r2.rodrigues(), rod2,
decimal=14, # 1e-14
)
def test_from_aa(self):
# Axis should come out of rotation normalized
angle = 0.8
axis = numpy.array([[-3],
[2],
[1]])
axis_norm = array_normalize(axis)
r = Rotation.from_axis_angle(axis, angle)
nose.tools.assert_equal(angle, r.angle())
numpy.testing.assert_equal(axis_norm, r.axis())
def test_from_ypr(self):
y = 1.2
p = 0.3
r = -1.0
# XXX
rot = Rotation.from_ypr(y, p, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# 0XX
rot = Rotation.from_ypr(0, p, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# X0X
rot = Rotation.from_ypr(y, 0, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# XX0
rot = Rotation.from_ypr(y, p, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
# 00X
rot = Rotation.from_ypr(0, 0, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# 0X0
rot = Rotation.from_ypr(0, p, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
# X00
rot = Rotation.from_ypr(y, 0, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
# 000
rot = Rotation.from_ypr(0, 0, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
def test_from_matrix(self):
# Create a non-identity matrix from a different constructor that we
# assume works
# Create new rotation with that matrix.
# New rotation to_matrix method should produce the same matrix
pre_r = Rotation.from_quaternion([[+2],
[-1],
[-3],
[+0]])
mat = pre_r.matrix()
r = Rotation.from_matrix(mat)
numpy.testing.assert_allclose(mat, r.matrix(), 1e-15)
def test_inverse(self):
# quaternion calc from:
# https://www.wolframalpha.com/input/?i=quaternion:+0%2B2i-j-3k&lk=3
r = Rotation.from_quaternion([[+2],
[-1],
[-3],
[+0]], ctype=ctypes.c_double)
r_inv = r.inverse()
e_inv = array_normalize([[-1/7.],
[+1/14.],
[+3/14.],
[0]])
numpy.testing.assert_allclose(
r_inv.quaternion(),
e_inv,
1e-15
)
r = Rotation.from_quaternion([[+2],
[-1],
[-3],
[+0]], ctype=ctypes.c_float)
r_inv = r.inverse()
numpy.testing.assert_allclose(
r_inv.quaternion(),
e_inv,
1e-7
)
def test_compose(self):
# Normalize quaternaion vector.
expected_quat = array_normalize([[+2.],
[-1.],
[-3.],
[+0.]])
r_ident_d = Rotation(ctypes.c_double)
r_ident_f = Rotation(ctypes.c_float)
r_other_d = Rotation.from_quaternion(expected_quat, ctypes.c_double)
r_other_f = Rotation.from_quaternion(expected_quat, ctypes.c_float)
r_res_d = r_ident_d.compose(r_other_d)
nose.tools.assert_is_not(r_other_d, r_res_d)
numpy.testing.assert_equal(r_res_d, r_other_d)
numpy.testing.assert_equal(r_res_d.quaternion(), expected_quat)
r_res_f = r_ident_f.compose(r_other_f)
nose.tools.assert_is_not(r_other_f, r_res_f)
numpy.testing.assert_equal(r_res_f, r_other_f)
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat,
1e-7)
# Should also work with multiply operator
r_res_d = r_ident_d * r_other_d
nose.tools.assert_is_not(r_other_d, r_res_d)
numpy.testing.assert_equal(r_res_d, r_other_d)
numpy.testing.assert_equal(r_res_d.quaternion(), expected_quat)
r_res_f = r_ident_f * r_other_f
nose.tools.assert_is_not(r_other_f, r_res_f)
numpy.testing.assert_equal(r_res_f, r_other_f)
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat,
1e-7)
# Rotation of non-congruent types should be converted automatically
r_res_d = r_ident_d.compose(r_other_f)
nose.tools.assert_is_not(r_res_d, r_other_f)
numpy.testing.assert_allclose(r_res_d.quaternion(),
r_other_f.quaternion(),
1e-7)
numpy.testing.assert_allclose(r_res_d.quaternion(), expected_quat,
1e-7)
r_res_f = r_ident_f.compose(r_other_d)
nose.tools.assert_is_not(r_res_f, r_other_f)
numpy.testing.assert_allclose(r_res_f.quaternion(),
r_other_f.quaternion(),
1e-7)
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat,
1e-7)
# Equality check between types should pass due to integrety resolution
# inside function.
r_res_d = r_ident_d * r_other_f
nose.tools.assert_is_not(r_res_d, r_other_f)
numpy.testing.assert_allclose(r_res_d.quaternion(),
r_other_f.quaternion(),
1e-7)
numpy.testing.assert_allclose(r_res_d.quaternion(), expected_quat,
1e-7)
r_res_f = r_ident_f * r_other_d
nose.tools.assert_is_not(r_res_f, r_other_f)
numpy.testing.assert_equal(r_res_f.quaternion(),
r_other_f.quaternion())
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat,
1e-7)
def test_rotation_vector(self):
vec = [[1],
[0],
[0]]
vec_expected = [[0],
[1],
[0]]
r_axis = [[0],
[0],
[1]]
r_angle = math.pi / 2.
r = Rotation.from_axis_angle(r_axis, r_angle)
vec_rotated = r.rotate_vector(vec)
numpy.testing.assert_array_almost_equal(vec_expected, vec_rotated)
# should be able to multiply a rotation as a left-hand side arg with a
# 3x1 vector as the right-hand side arg
vec_rotated = r * vec
numpy.testing.assert_array_almost_equal(vec_expected, vec_rotated)
def test_interpolation(self):
x_d = Rotation.from_axis_angle([[1], [0], [0]], 0, ctypes.c_double)
y_d = Rotation.from_axis_angle([[0], [1], [0]], math.pi / 2, ctypes.c_double)
r_d = Rotation.from_axis_angle([[0], [1], [0]], math.pi / 4, ctypes.c_double)
x_f = Rotation.from_axis_angle([[1], [0], [0]], 0, ctypes.c_float)
y_f = Rotation.from_axis_angle([[0], [1], [0]], math.pi / 2, ctypes.c_float)
r_f = Rotation.from_axis_angle([[0], [1], [0]], math.pi / 4, ctypes.c_float)
z_d = Rotation.interpolate(x_d, y_d, 0.5)
z_f = Rotation.interpolate(x_f, y_f, 0.5)
nose.tools.assert_almost_equal((z_d.inverse() * r_d).angle(), 0, 14)
nose.tools.assert_almost_equal((z_f.inverse() * r_f).angle(), 0, 6)
# Should auto-convert different y-type into x's type for computation.
# Return should be of x's type.
z_d = Rotation.interpolate(x_d, y_f, 0.5)
nose.tools.assert_is(z_d._ctype, x_d._ctype)
nose.tools.assert_is_not(z_d._ctype, y_f._ctype)
nose.tools.assert_almost_equal((z_d.inverse() * r_d).angle(), 0, 14)
z_f = Rotation.interpolate(x_f, y_d, 0.5)
nose.tools.assert_is(z_f._ctype, x_f._ctype)
nose.tools.assert_is_not(z_f._ctype, y_d._ctype)
nose.tools.assert_almost_equal((z_f.inverse() * r_f).angle(), 0, 6)
def test_interpolated_rotations(self):
x = Rotation.from_axis_angle([[1], [0], [0]], 0)
a = math.pi / 2
y = Rotation.from_axis_angle([[0], [1], [0]], a)
i_list = Rotation.interpolated_rotations(x, y, 3)
nose.tools.assert_equal([i._ctype for i in i_list],
[ctypes.c_double] * 3)
i0_e_axis, i0_e_angle = [[0], [1], [0]], a * 0.25
i1_e_axis, i1_e_angle = [[0], [1], [0]], a * 0.50
i2_e_axis, i2_e_angle = [[0], [1], [0]], a * 0.75
numpy.testing.assert_almost_equal(i_list[0].axis(), i0_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[0].angle(), i0_e_angle, 14)
numpy.testing.assert_almost_equal(i_list[1].axis(), i1_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[1].angle(), i1_e_angle, 14)
numpy.testing.assert_almost_equal(i_list[2].axis(), i2_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[2].angle(), i2_e_angle, 14)
# Mixed types
a = math.pi / 2
x = Rotation.from_axis_angle([[1], [0], [0]], 0, ctypes.c_float)
y = Rotation.from_axis_angle([[0], [1], [0]], a)
i_list = Rotation.interpolated_rotations(x, y, 3)
nose.tools.assert_equal([i._ctype for i in i_list],
[ctypes.c_float] * 3)
numpy.testing.assert_almost_equal(i_list[0].axis(), i0_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[0].angle(), i0_e_angle, 6)
numpy.testing.assert_almost_equal(i_list[1].axis(), i1_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[1].angle(), i1_e_angle, 6)
numpy.testing.assert_almost_equal(i_list[2].axis(), i2_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[2].angle(), i2_e_angle, 6)
| 38.101075 | 85 | 0.570469 | """
ckwg +31
Copyright 2016-2017 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for vital.types.Rotation class
"""
import ctypes
import math
import unittest
import nose.tools
import numpy
from vital.types import Rotation
def array_normalize(a, dtype=None):
a = numpy.asarray(a, dtype)
return a / numpy.linalg.norm(a)
class TestVitalRotation (unittest.TestCase):
def test_new_default(self):
rot_d = Rotation(ctypes.c_double)
nose.tools.assert_equal(rot_d._ctype, ctypes.c_double)
nose.tools.assert_equal(rot_d._spec, 'd')
rot_f = Rotation(ctypes.c_float)
nose.tools.assert_equal(rot_f._ctype, ctypes.c_float)
nose.tools.assert_equal(rot_f._spec, 'f')
def test_eq(self):
r1 = Rotation(ctypes.c_double)
r2 = Rotation(ctypes.c_double)
nose.tools.assert_equal(r1, r2)
r1 = Rotation(ctypes.c_float)
r2 = Rotation(ctypes.c_float)
nose.tools.assert_equal(r1, r2)
r1 = Rotation(ctypes.c_double)
r2 = Rotation(ctypes.c_float)
nose.tools.assert_equal(r1, r2)
r1 = Rotation.from_quaternion([1,2,3,4], ctype=ctypes.c_double)
r2 = Rotation.from_quaternion([1,2,3,4], ctype=ctypes.c_double)
nose.tools.assert_equal(r1, r2)
r1 = Rotation.from_quaternion([1,2,3,4], ctype=ctypes.c_double)
r2 = Rotation.from_quaternion([-1,-2,-3,-4], ctype=ctypes.c_double)
assert r1.angle_from(r2) < 1e-12
def test_to_matrix(self):
rot_d = Rotation(ctypes.c_double)
numpy.testing.assert_array_equal(
rot_d.matrix(), numpy.eye(3)
)
rot_f = Rotation(ctypes.c_float)
numpy.testing.assert_array_equal(
rot_f.matrix(), numpy.eye(3)
)
def test_to_quaternion(self):
rot_d = Rotation(ctypes.c_double)
numpy.testing.assert_array_equal(rot_d.quaternion(),
[[0],
[0],
[0],
[1]])
rot_f = Rotation(ctypes.c_float)
numpy.testing.assert_array_equal(rot_f.quaternion(),
[[0],
[0],
[0],
[1]])
def test_to_axis_angle(self):
ident_axis = [[0],
[0],
[1]]
ident_angle = 0
rot_d = Rotation(ctypes.c_double)
rot_f = Rotation(ctypes.c_float)
numpy.testing.assert_equal(rot_d.axis(), ident_axis)
nose.tools.assert_equal(rot_d.angle(), ident_angle)
numpy.testing.assert_equal(rot_f.axis(), ident_axis)
nose.tools.assert_equal(rot_f.angle(), ident_angle)
def test_to_rodrigues(self):
ident_rod = [[0],
[0],
[0]]
rot_d = Rotation(ctypes.c_double)
rot_f = Rotation(ctypes.c_float)
rod = rot_d.rodrigues()
numpy.testing.assert_equal(rod, ident_rod)
rod = rot_f.rodrigues()
numpy.testing.assert_equal(rod, ident_rod)
def test_to_ypr(self):
ident_ypr = (math.pi / 2, 0, -math.pi)
ident_ypr_float = map(lambda v: ctypes.c_float(v).value, ident_ypr)
rot_d = Rotation(ctypes.c_double)
rot_f = Rotation(ctypes.c_float)
numpy.testing.assert_equal(
rot_d.yaw_pitch_roll(),
ident_ypr
)
numpy.testing.assert_equal(
rot_f.yaw_pitch_roll(),
ident_ypr_float
)
def test_from_quaternion(self):
q = array_normalize([[+2],
[-1],
[-3],
[+0]], float)
r = Rotation.from_quaternion(q)
numpy.testing.assert_equal(
r.quaternion(), q
)
def test_from_rodrigues(self):
rod_list_1 = [[0],
[0],
[0]]
r1 = Rotation.from_rodrigues(rod_list_1)
numpy.testing.assert_equal(r1.rodrigues(), rod_list_1)
# returned as is.
rod2 = numpy.array([[ 2],
[ -1],
[0.5]])
nod2_normed = array_normalize(rod2)
print 'r2 2-norm:', numpy.linalg.norm(rod2)
print 'r2-normed:', nod2_normed
r2 = Rotation.from_rodrigues(rod2)
numpy.testing.assert_array_almost_equal(
r2.rodrigues(), rod2,
decimal=14, # 1e-14
)
def test_from_aa(self):
# Axis should come out of rotation normalized
angle = 0.8
axis = numpy.array([[-3],
[2],
[1]])
axis_norm = array_normalize(axis)
r = Rotation.from_axis_angle(axis, angle)
nose.tools.assert_equal(angle, r.angle())
numpy.testing.assert_equal(axis_norm, r.axis())
def test_from_ypr(self):
y = 1.2
p = 0.3
r = -1.0
# XXX
rot = Rotation.from_ypr(y, p, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# 0XX
rot = Rotation.from_ypr(0, p, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# X0X
rot = Rotation.from_ypr(y, 0, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# XX0
rot = Rotation.from_ypr(y, p, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
# 00X
rot = Rotation.from_ypr(0, 0, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# 0X0
rot = Rotation.from_ypr(0, p, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
# X00
rot = Rotation.from_ypr(y, 0, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
# 000
rot = Rotation.from_ypr(0, 0, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
def test_from_matrix(self):
# Create a non-identity matrix from a different constructor that we
# assume works
# Create new rotation with that matrix.
# New rotation to_matrix method should produce the same matrix
pre_r = Rotation.from_quaternion([[+2],
[-1],
[-3],
[+0]])
mat = pre_r.matrix()
r = Rotation.from_matrix(mat)
numpy.testing.assert_allclose(mat, r.matrix(), 1e-15)
def test_inverse(self):
# quaternion calc from:
# https://www.wolframalpha.com/input/?i=quaternion:+0%2B2i-j-3k&lk=3
r = Rotation.from_quaternion([[+2],
[-1],
[-3],
[+0]], ctype=ctypes.c_double)
r_inv = r.inverse()
e_inv = array_normalize([[-1/7.],
[+1/14.],
[+3/14.],
[0]])
numpy.testing.assert_allclose(
r_inv.quaternion(),
e_inv,
1e-15
)
r = Rotation.from_quaternion([[+2],
[-1],
[-3],
[+0]], ctype=ctypes.c_float)
r_inv = r.inverse()
numpy.testing.assert_allclose(
r_inv.quaternion(),
e_inv,
1e-7
)
def test_compose(self):
# Normalize quaternaion vector.
expected_quat = array_normalize([[+2.],
[-1.],
[-3.],
[+0.]])
r_ident_d = Rotation(ctypes.c_double)
r_ident_f = Rotation(ctypes.c_float)
r_other_d = Rotation.from_quaternion(expected_quat, ctypes.c_double)
r_other_f = Rotation.from_quaternion(expected_quat, ctypes.c_float)
r_res_d = r_ident_d.compose(r_other_d)
nose.tools.assert_is_not(r_other_d, r_res_d)
numpy.testing.assert_equal(r_res_d, r_other_d)
numpy.testing.assert_equal(r_res_d.quaternion(), expected_quat)
r_res_f = r_ident_f.compose(r_other_f)
nose.tools.assert_is_not(r_other_f, r_res_f)
numpy.testing.assert_equal(r_res_f, r_other_f)
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat,
1e-7)
# Should also work with multiply operator
r_res_d = r_ident_d * r_other_d
nose.tools.assert_is_not(r_other_d, r_res_d)
numpy.testing.assert_equal(r_res_d, r_other_d)
numpy.testing.assert_equal(r_res_d.quaternion(), expected_quat)
r_res_f = r_ident_f * r_other_f
nose.tools.assert_is_not(r_other_f, r_res_f)
numpy.testing.assert_equal(r_res_f, r_other_f)
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat,
1e-7)
# Rotation of non-congruent types should be converted automatically
r_res_d = r_ident_d.compose(r_other_f)
nose.tools.assert_is_not(r_res_d, r_other_f)
numpy.testing.assert_allclose(r_res_d.quaternion(),
r_other_f.quaternion(),
1e-7)
numpy.testing.assert_allclose(r_res_d.quaternion(), expected_quat,
1e-7)
r_res_f = r_ident_f.compose(r_other_d)
nose.tools.assert_is_not(r_res_f, r_other_f)
numpy.testing.assert_allclose(r_res_f.quaternion(),
r_other_f.quaternion(),
1e-7)
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat,
1e-7)
# Equality check between types should pass due to integrety resolution
# inside function.
r_res_d = r_ident_d * r_other_f
nose.tools.assert_is_not(r_res_d, r_other_f)
numpy.testing.assert_allclose(r_res_d.quaternion(),
r_other_f.quaternion(),
1e-7)
numpy.testing.assert_allclose(r_res_d.quaternion(), expected_quat,
1e-7)
r_res_f = r_ident_f * r_other_d
nose.tools.assert_is_not(r_res_f, r_other_f)
numpy.testing.assert_equal(r_res_f.quaternion(),
r_other_f.quaternion())
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat,
1e-7)
def test_rotation_vector(self):
vec = [[1],
[0],
[0]]
vec_expected = [[0],
[1],
[0]]
r_axis = [[0],
[0],
[1]]
r_angle = math.pi / 2.
r = Rotation.from_axis_angle(r_axis, r_angle)
vec_rotated = r.rotate_vector(vec)
numpy.testing.assert_array_almost_equal(vec_expected, vec_rotated)
# should be able to multiply a rotation as a left-hand side arg with a
# 3x1 vector as the right-hand side arg
vec_rotated = r * vec
numpy.testing.assert_array_almost_equal(vec_expected, vec_rotated)
def test_interpolation(self):
x_d = Rotation.from_axis_angle([[1], [0], [0]], 0, ctypes.c_double)
y_d = Rotation.from_axis_angle([[0], [1], [0]], math.pi / 2, ctypes.c_double)
r_d = Rotation.from_axis_angle([[0], [1], [0]], math.pi / 4, ctypes.c_double)
x_f = Rotation.from_axis_angle([[1], [0], [0]], 0, ctypes.c_float)
y_f = Rotation.from_axis_angle([[0], [1], [0]], math.pi / 2, ctypes.c_float)
r_f = Rotation.from_axis_angle([[0], [1], [0]], math.pi / 4, ctypes.c_float)
z_d = Rotation.interpolate(x_d, y_d, 0.5)
z_f = Rotation.interpolate(x_f, y_f, 0.5)
nose.tools.assert_almost_equal((z_d.inverse() * r_d).angle(), 0, 14)
nose.tools.assert_almost_equal((z_f.inverse() * r_f).angle(), 0, 6)
# Should auto-convert different y-type into x's type for computation.
z_d = Rotation.interpolate(x_d, y_f, 0.5)
nose.tools.assert_is(z_d._ctype, x_d._ctype)
nose.tools.assert_is_not(z_d._ctype, y_f._ctype)
nose.tools.assert_almost_equal((z_d.inverse() * r_d).angle(), 0, 14)
z_f = Rotation.interpolate(x_f, y_d, 0.5)
nose.tools.assert_is(z_f._ctype, x_f._ctype)
nose.tools.assert_is_not(z_f._ctype, y_d._ctype)
nose.tools.assert_almost_equal((z_f.inverse() * r_f).angle(), 0, 6)
def test_interpolated_rotations(self):
x = Rotation.from_axis_angle([[1], [0], [0]], 0)
a = math.pi / 2
y = Rotation.from_axis_angle([[0], [1], [0]], a)
i_list = Rotation.interpolated_rotations(x, y, 3)
nose.tools.assert_equal([i._ctype for i in i_list],
[ctypes.c_double] * 3)
i0_e_axis, i0_e_angle = [[0], [1], [0]], a * 0.25
i1_e_axis, i1_e_angle = [[0], [1], [0]], a * 0.50
i2_e_axis, i2_e_angle = [[0], [1], [0]], a * 0.75
numpy.testing.assert_almost_equal(i_list[0].axis(), i0_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[0].angle(), i0_e_angle, 14)
numpy.testing.assert_almost_equal(i_list[1].axis(), i1_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[1].angle(), i1_e_angle, 14)
numpy.testing.assert_almost_equal(i_list[2].axis(), i2_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[2].angle(), i2_e_angle, 14)
# Mixed types
a = math.pi / 2
x = Rotation.from_axis_angle([[1], [0], [0]], 0, ctypes.c_float)
y = Rotation.from_axis_angle([[0], [1], [0]], a)
i_list = Rotation.interpolated_rotations(x, y, 3)
nose.tools.assert_equal([i._ctype for i in i_list],
[ctypes.c_float] * 3)
numpy.testing.assert_almost_equal(i_list[0].axis(), i0_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[0].angle(), i0_e_angle, 6)
numpy.testing.assert_almost_equal(i_list[1].axis(), i1_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[1].angle(), i1_e_angle, 6)
numpy.testing.assert_almost_equal(i_list[2].axis(), i2_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[2].angle(), i2_e_angle, 6)
| false | true |
f724e6dae565bcc5a26d05bdb7f2553473458a1f | 1,242 | py | Python | tests/test_project_setups.py | insspb/python3-boilerplate | 7d70cd8a7bbbe2805ae5f4cb538996a30b96c736 | [
"MIT"
] | 3 | 2020-04-22T04:09:18.000Z | 2021-12-20T08:44:44.000Z | tests/test_project_setups.py | insspb/python3-boilerplate | 7d70cd8a7bbbe2805ae5f4cb538996a30b96c736 | [
"MIT"
] | 11 | 2019-08-31T08:37:40.000Z | 2019-08-31T11:25:29.000Z | tests/test_project_setups.py | insspb/python3-boilerplate | 7d70cd8a7bbbe2805ae5f4cb538996a30b96c736 | [
"MIT"
] | 1 | 2020-11-24T11:18:50.000Z | 2020-11-24T11:18:50.000Z | import pytest
"""
This file include several configuration of answers to setup file.
Each configuration should be completed without errors to pass this tests.
"""
@pytest.mark.skip
def test_all_python_versions_deploy():
"""Test setup.py format correct for all Python versions support."""
pass
@pytest.mark.skip
def test_2x_only_python_version_deploy():
"""Test setup.py format correct for Python 2.7 only versions support."""
pass
@pytest.mark.skip
def test_3x_only_python_versions_deploy():
"""Test setup.py format correct for all Python 3.x versions supported."""
pass
@pytest.mark.skip
def test_markdown_documentation():
pass
@pytest.mark.skip
def test_rst_documentation():
pass
@pytest.mark.skip
def test_install_github_issues_templates():
pass
@pytest.mark.skip
def test_install_gitlab_issues_templates():
pass
@pytest.mark.skip
def test_mit_license_deploy():
pass
@pytest.mark.skip
def test_bsd_license_deploy():
pass
@pytest.mark.skip
def test_gnu_license_deploy():
pass
@pytest.mark.skip
def test_apache_license_deploy():
pass
@pytest.mark.skip
def test_unlicensed_license_deploy():
pass
@pytest.mark.skip
def test_none_license_deploy():
pass
| 16.342105 | 77 | 0.750403 | import pytest
@pytest.mark.skip
def test_all_python_versions_deploy():
pass
@pytest.mark.skip
def test_2x_only_python_version_deploy():
pass
@pytest.mark.skip
def test_3x_only_python_versions_deploy():
pass
@pytest.mark.skip
def test_markdown_documentation():
pass
@pytest.mark.skip
def test_rst_documentation():
pass
@pytest.mark.skip
def test_install_github_issues_templates():
pass
@pytest.mark.skip
def test_install_gitlab_issues_templates():
pass
@pytest.mark.skip
def test_mit_license_deploy():
pass
@pytest.mark.skip
def test_bsd_license_deploy():
pass
@pytest.mark.skip
def test_gnu_license_deploy():
pass
@pytest.mark.skip
def test_apache_license_deploy():
pass
@pytest.mark.skip
def test_unlicensed_license_deploy():
pass
@pytest.mark.skip
def test_none_license_deploy():
pass
| true | true |
f724e874d78e8be3faac5983bcecca02a0597e59 | 4,291 | py | Python | benchmark/startQiskit_QC2348.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_QC2348.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_QC2348.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=36
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=15
prog.cz(input_qubit[0],input_qubit[3]) # number=16
prog.h(input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=20
prog.cz(input_qubit[0],input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.cx(input_qubit[0],input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=34
prog.cx(input_qubit[0],input_qubit[3]) # number=35
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[1]) # number=29
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=23
prog.cz(input_qubit[2],input_qubit[0]) # number=24
prog.h(input_qubit[0]) # number=25
prog.y(input_qubit[2]) # number=30
prog.cx(input_qubit[2],input_qubit[0]) # number=11
prog.cx(input_qubit[2],input_qubit[0]) # number=18
prog.h(input_qubit[0]) # number=26
prog.x(input_qubit[2]) # number=31
prog.cz(input_qubit[2],input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=28
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2348.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 35.172131 | 165 | 0.655558 |
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.x(input_qubit[3])
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.cx(input_qubit[0],input_qubit[3])
prog.x(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[2],input_qubit[0])
prog.h(input_qubit[0])
prog.y(input_qubit[2])
prog.cx(input_qubit[2],input_qubit[0])
prog.cx(input_qubit[2],input_qubit[0])
prog.h(input_qubit[0])
prog.x(input_qubit[2])
prog.cz(input_qubit[2],input_qubit[0])
prog.h(input_qubit[0])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2348.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
f724e8a3de033be4e0d6edde1760bbeabfca72f8 | 1,538 | py | Python | clients/python-legacy/generated/test/test_queue_item_impl.py | cliffano/jenkins-api-clients-generator | 522d02b3a130a29471df5ec1d3d22c822b3d0813 | [
"MIT"
] | null | null | null | clients/python-legacy/generated/test/test_queue_item_impl.py | cliffano/jenkins-api-clients-generator | 522d02b3a130a29471df5ec1d3d22c822b3d0813 | [
"MIT"
] | null | null | null | clients/python-legacy/generated/test/test_queue_item_impl.py | cliffano/jenkins-api-clients-generator | 522d02b3a130a29471df5ec1d3d22c822b3d0813 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.queue_item_impl import QueueItemImpl # noqa: E501
from openapi_client.rest import ApiException
class TestQueueItemImpl(unittest.TestCase):
"""QueueItemImpl unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test QueueItemImpl
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.queue_item_impl.QueueItemImpl() # noqa: E501
if include_optional :
return QueueItemImpl(
_class = '',
expected_build_number = 56,
id = '',
pipeline = '',
queued_time = 56
)
else :
return QueueItemImpl(
)
def testQueueItemImpl(self):
"""Test QueueItemImpl"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 26.982456 | 85 | 0.641743 |
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.queue_item_impl import QueueItemImpl
from openapi_client.rest import ApiException
class TestQueueItemImpl(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
include_optional :
return QueueItemImpl(
_class = '',
expected_build_number = 56,
id = '',
pipeline = '',
queued_time = 56
)
else :
return QueueItemImpl(
)
def testQueueItemImpl(self):
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| true | true |
f724e8d11edae12200491270eda330db309592bd | 927 | py | Python | test/test_create_records.py | Borye/vika.py | 7b4ac29d308e00e2bbfc37dbcaa3f6c7a4a2236f | [
"MIT"
] | 39 | 2020-10-27T13:17:37.000Z | 2022-03-17T11:04:39.000Z | test/test_create_records.py | Borye/vika.py | 7b4ac29d308e00e2bbfc37dbcaa3f6c7a4a2236f | [
"MIT"
] | 9 | 2020-10-27T14:44:48.000Z | 2022-01-19T04:46:58.000Z | test/test_create_records.py | Borye/vika.py | 7b4ac29d308e00e2bbfc37dbcaa3f6c7a4a2236f | [
"MIT"
] | 8 | 2020-10-27T15:12:34.000Z | 2022-01-19T14:23:15.000Z | import unittest
import time
from vika import Vika
from . import TEST_TABLE, TEST_API_BASE, TEST_API_TOKEN
class TestCreateRecords(unittest.TestCase):
def setUp(self):
vika = Vika(TEST_API_TOKEN)
vika.set_api_base(TEST_API_BASE)
self.dst = vika.datasheet(TEST_TABLE)
def test_record_create(self):
time.sleep(1)
record = self.dst.records.create({
"title": "高等数学"
})
time.sleep(1)
self.assertIsNotNone(record._id)
records = self.dst.records.bulk_create([
{
"title": "离散数学"
},
{
"title": "线性代数"
}
])
self.created_records = records + [record]
for rec in records:
self.assertIsNotNone(rec._id)
def tearDown(self):
self.dst.delete_records(self.created_records)
if __name__ == '__main__':
unittest.main()
| 24.394737 | 55 | 0.577131 | import unittest
import time
from vika import Vika
from . import TEST_TABLE, TEST_API_BASE, TEST_API_TOKEN
class TestCreateRecords(unittest.TestCase):
def setUp(self):
vika = Vika(TEST_API_TOKEN)
vika.set_api_base(TEST_API_BASE)
self.dst = vika.datasheet(TEST_TABLE)
def test_record_create(self):
time.sleep(1)
record = self.dst.records.create({
"title": "高等数学"
})
time.sleep(1)
self.assertIsNotNone(record._id)
records = self.dst.records.bulk_create([
{
"title": "离散数学"
},
{
"title": "线性代数"
}
])
self.created_records = records + [record]
for rec in records:
self.assertIsNotNone(rec._id)
def tearDown(self):
self.dst.delete_records(self.created_records)
if __name__ == '__main__':
unittest.main()
| true | true |
f724e8e9d1a8e16562e5a832d20c371877bd9ce1 | 17,324 | py | Python | openmaptiles/mbtile_tools.py | smellman/openmaptiles-tools | c310d1a57d60477c0452575c5b1983bce3fffac2 | [
"MIT"
] | 3 | 2021-02-02T10:16:43.000Z | 2021-06-14T20:00:06.000Z | openmaptiles/mbtile_tools.py | smellman/openmaptiles-tools | c310d1a57d60477c0452575c5b1983bce3fffac2 | [
"MIT"
] | 1 | 2021-02-23T17:02:14.000Z | 2021-02-23T17:02:14.000Z | openmaptiles/mbtile_tools.py | isabella232/openmaptiles-tools | 84e76e7dd5e7118de8dd11f1945607de04d3ea0e | [
"MIT"
] | 1 | 2020-08-13T09:01:10.000Z | 2020-08-13T09:01:10.000Z | import json
import os
import sqlite3
from datetime import datetime
from pathlib import Path
import asyncpg
from tabulate import tabulate
from typing import Dict
from openmaptiles.pgutils import get_postgis_version, get_vector_layers
from openmaptiles.sqlite_utils import query
from openmaptiles.sqltomvt import MvtGenerator
from openmaptiles.tileset import Tileset
from openmaptiles.utils import print_err, Bbox, print_tile, shorten_str
class KeyFinder:
"""Search mbtiles for frequently used duplicate tiles"""
def __init__(self,
mbtiles,
show_size=None,
show_examples=None,
outfile: str = None,
zoom=None,
min_dup_count=None,
verbose=False) -> None:
self.mbtiles = mbtiles
if min_dup_count is not None:
min_dup_count = int(min_dup_count)
if min_dup_count < 2:
raise ValueError(f"min_dup_count must be an integer ≥ 2")
self.min_dup_count = min_dup_count
else:
self.min_dup_count = 50 if zoom and zoom > 12 else 20
self.use_stdout = outfile == '-'
self.zoom = zoom
self.verbose = verbose
if outfile:
self.outfile = True if self.use_stdout else Path(outfile)
else:
self.outfile = None
self.show_size = self.verbose if show_size is None else show_size
self.show_examples = self.verbose if show_examples is None else show_examples
def run(self):
if self.outfile and not self.use_stdout:
with self.outfile.open("w"):
pass # create or truncate file, but don't write anything to it yet
with sqlite3.connect(self.mbtiles) as conn:
results = []
if self.show_size:
sql = "SELECT cnt, dups.tile_id, LENGTH(tile_data) FROM (" \
" SELECT tile_id, COUNT(*) AS cnt FROM map " \
" GROUP BY tile_id HAVING cnt >= ?" \
") dups JOIN images ON images.tile_id = dups.tile_id"
sql_opts = [self.min_dup_count]
if self.zoom:
sql += f" WHERE zoom_level=?"
sql_opts.append(self.zoom)
else:
sql_opts = []
sql = "SELECT COUNT(*) cnt, tile_id FROM map"
if self.zoom:
sql += f" WHERE zoom_level=?"
sql_opts.append(self.zoom)
sql += " GROUP BY tile_id HAVING cnt >= ?"
sql_opts.append(self.min_dup_count)
for vals in query(conn, sql, sql_opts):
results.append(vals)
results.sort(reverse=True)
size = None
examples = None
for vals in results:
if len(vals) == 3:
count, tile_id, size = vals
else:
count, tile_id = vals
if self.show_examples:
example_sql = "SELECT zoom_level, tile_column, tile_row FROM map " \
"WHERE tile_id = ? LIMIT 5"
examples = [f'{z}/{x}/{y}' for z, x, y in
query(conn, example_sql, [tile_id])]
if self.verbose:
res = f"{tile_id} x {count:,}"
if self.show_size:
res += f', {size:,} bytes'
if self.show_examples:
res += ', examples: ' + ', '.join(examples)
print_err(res)
results = [v[1] for v in results]
if self.use_stdout:
for v in results:
print(v)
elif self.outfile:
with self.outfile.open("a") as f:
f.writelines([str(v) + '\n' for v in results])
return results
class Imputer:
def __init__(self, mbtiles, keys, zoom, outfile: str = None,
verbose=False) -> None:
self.mbtiles = mbtiles
self.keys = {k: 0 for k in keys}
self.zoom = zoom
self.use_stdout = outfile == '-'
self.verbose = verbose or not self.use_stdout
if outfile:
self.outfile = True if self.use_stdout else Path(outfile)
else:
self.outfile = None
def run(self):
with sqlite3.connect(self.mbtiles) as conn:
limit_to_keys = not self.outfile
if self.outfile and not self.use_stdout:
with self.outfile.open("w"):
pass # create or truncate file, but don't write anything to it yet
keyed_tiles = 0
nokey_tiles = 0
cursor = conn.cursor()
key_stats = self.keys
for with_key, without_key in self.tile_batches(conn, limit_to_keys):
without_key.sort()
if with_key:
with_key.sort()
for val in with_key:
key_stats[val[3]] += 1
cursor.executemany(
'INSERT OR IGNORE INTO map'
'(zoom_level, tile_column, tile_row, tile_id)'
' VALUES(?,?,?,?)',
with_key)
keyed_tiles += cursor.rowcount
conn.commit()
if without_key:
if self.use_stdout:
for v in without_key:
print(v, end='')
else:
with self.outfile.open("a") as f:
f.writelines(without_key)
nokey_tiles += len(without_key)
if self.verbose:
for k, c in key_stats.items():
print_err(f"{k} - added {c:,}")
print_err(f'Total imputed tiles: {keyed_tiles:,}')
if nokey_tiles:
print_err(f'Total tiles need to be generated: {nokey_tiles:,}')
def tile_batches(self, conn: sqlite3.Connection, limit_to_keys=False):
"""Generate batches of tiles to be processed for the new zoom,
based on the previous zoom level. Each yield contains two batches:
one with "empty" tiles (those that match known keys),
and another with non-empty tiles (only if limit_to_keys is False).
The first batch can be inserted into mbtiles db as is.
The second batch will be used as a list of tiles to be generated.
"""
batch_size = 1000000
zoom = self.zoom
search_zoom = zoom - 1
sql = f"SELECT tile_column, tile_row, tile_id FROM map WHERE zoom_level=?"
sql_args = [search_zoom]
if limit_to_keys:
sql += f" and tile_id IN ({','.join(('?' * len(self.keys)))})"
sql_args += self.keys
with_key = []
without_key = []
max_y = 2 ** search_zoom - 1
for x, y, key in query(conn, sql, sql_args):
if limit_to_keys or key in self.keys:
with_key.append((zoom, x * 2, y * 2, key))
with_key.append((zoom, x * 2 + 1, y * 2, key))
with_key.append((zoom, x * 2, y * 2 + 1, key))
with_key.append((zoom, x * 2 + 1, y * 2 + 1, key))
else:
# mbtiles uses inverted Y (starts at the bottom)
ry = max_y - y
without_key.append(f"{zoom}/{x * 2}/{ry * 2}\n")
without_key.append(f"{zoom}/{x * 2 + 1}/{ry * 2}\n")
without_key.append(f"{zoom}/{x * 2}/{ry * 2 + 1}\n")
without_key.append(f"{zoom}/{x * 2 + 1}/{ry * 2 + 1}\n")
if len(with_key) > batch_size or len(without_key) > batch_size:
yield with_key, without_key
with_key = []
without_key = []
if with_key or without_key:
yield with_key, without_key
class Metadata:
def __init__(self, mbtiles: str, show_json: bool = False,
show_ranges: bool = False) -> None:
self.mbtiles = mbtiles
self.show_json = show_json
self.show_ranges = show_ranges
def print_all(self, file: str = None):
file = file or self.mbtiles
data = self._get_metadata(file)
if data:
width = max((len(v) for v in data.keys()))
for name, value in sorted(data.items(),
key=lambda v: v[0] if v[0] != 'json' else 'zz'):
print(f"{name:{width}} {self.validate(name, value)[0]}")
else:
print(f"There are no values present in {file} metadata table")
if self.show_ranges:
with sqlite3.connect(file) as conn:
sql = """\
SELECT zoom_level, COUNT(*) AS count,
MIN(tile_column) AS min_column, MAX(tile_column) AS max_column,
MIN(tile_row) AS min_row, MAX(tile_row) AS max_row
FROM map
GROUP BY zoom_level
"""
res = []
for z, cnt, min_x, max_x, min_y, max_y in sorted(query(conn, sql, [])):
res.append({
"Zoom": z,
"Tile count": f"{cnt:,}",
"Found tile ranges": f"{min_x},{min_y} x {max_x},{max_y}",
})
print("\n" + tabulate(res, headers="keys"))
def get_value(self, name):
with sqlite3.connect(self.mbtiles) as conn:
cursor = conn.cursor()
cursor.execute("SELECT value FROM metadata WHERE name=?", [name])
row = cursor.fetchone()
if row is None:
print_err(f"Metadata field '{name}' is not found")
exit(1)
print(row[0])
def set_value(self, name, value):
if value is not None:
_, is_valid = self.validate(name, value)
if not is_valid:
raise ValueError(f"Invalid {name}={value}")
with sqlite3.connect(self.mbtiles) as conn:
cursor = conn.cursor()
if value is None:
cursor.execute("DELETE FROM metadata WHERE name=?;", [name])
else:
cursor.execute(
"INSERT OR REPLACE INTO metadata(name, value) VALUES (?, ?);",
[name, value])
async def generate(self, tileset, reset, auto_minmax,
pghost, pgport, dbname, user, password):
ts = Tileset.parse(tileset)
print(
f'Connecting to PostgreSQL at {pghost}:{pgport}, db={dbname}, user={user}...')
try:
async with asyncpg.create_pool(
database=dbname, host=pghost, port=pgport, user=user,
password=password, min_size=1, max_size=1,
) as pool:
async with pool.acquire() as conn:
mvt = MvtGenerator(
ts,
postgis_ver=await get_postgis_version(conn),
zoom='$1', x='$2', y='$3',
)
json_data = dict(vector_layers=await get_vector_layers(conn, mvt))
except ConnectionError as err:
print(f"Unable to connect to Postgres database: {err}")
raise err
# Convert tileset to the metadata object according to mbtiles 1.3 spec
# https://github.com/mapbox/mbtiles-spec/blob/master/1.3/spec.md#content
metadata = dict(
# MUST
name=ts.name,
format="pbf",
json=json.dumps(json_data, ensure_ascii=False, separators=(',', ':')),
# SHOULD
bounds=",".join((str(v) for v in ts.bounds)),
center=",".join((str(v) for v in ts.center)),
minzoom=str(ts.minzoom),
maxzoom=str(ts.maxzoom),
# MAY
attribution=ts.attribution,
description=ts.description,
version=ts.version,
# EXTRAS
id=ts.id,
)
self._update_metadata(metadata, auto_minmax, reset, self.mbtiles,
ts.center[2])
def copy(self, target_mbtiles, reset, auto_minmax):
metadata = self._get_metadata(self.mbtiles)
self._update_metadata(metadata, auto_minmax, reset, target_mbtiles)
def show_tile(self, zoom, x, y, show_names, summary):
with sqlite3.connect(self.mbtiles) as conn:
sql = "SELECT tile_data FROM tiles " \
"WHERE zoom_level=? AND tile_column=? AND tile_row=?"
for row in query(conn, sql, [zoom, x, y]):
print_tile(row[0], show_names, summary, f"{zoom}/{x}/{y}")
break
else:
print(f"Tile {zoom}/{x}/{y} not found")
def _update_metadata(self, metadata, auto_minmax, reset, file, center_zoom=None):
def update_from_env(param, env_var):
val = os.environ.get(env_var)
if val is not None:
metadata[param] = val
update_from_env('name', 'METADATA_NAME')
update_from_env('minzoom', 'MIN_ZOOM')
update_from_env('maxzoom', 'MAX_ZOOM')
update_from_env('attribution', 'METADATA_ATTRIBUTION')
update_from_env('description', 'METADATA_DESCRIPTION')
update_from_env('version', 'METADATA_VERSION')
metadata['filesize'] = os.path.getsize(file)
bbox_str = os.environ.get('BBOX')
if bbox_str:
bbox = Bbox(bbox=bbox_str,
center_zoom=os.environ.get('CENTER_ZOOM', center_zoom))
metadata["bounds"] = bbox.bounds_str()
metadata["center"] = bbox.center_str()
with sqlite3.connect(file) as conn:
cursor = conn.cursor()
if auto_minmax:
cursor.execute("SELECT MIN(zoom_level), MAX(zoom_level) FROM map")
min_z, max_z = cursor.fetchone()
if min_z is None:
raise ValueError("Unable to get min/max zoom - tile data is empty")
metadata["minzoom"] = min_z
metadata["maxzoom"] = max_z
self._update_metadata_db(cursor, metadata, reset)
conn.commit()
print(f"New metadata values in {file}")
self.print_all(file=file)
@staticmethod
def _get_metadata(file) -> Dict[str, str]:
with sqlite3.connect(file) as conn:
return {k: v for k, v in
query(conn, "SELECT name, value FROM metadata", [])}
def _update_metadata_db(self, cursor, metadata, reset):
if reset:
# noinspection SqlWithoutWhere
cursor.execute("DELETE FROM metadata;")
for name, value in metadata.items():
_, is_valid = self.validate(name, value)
if not is_valid:
raise ValueError(f"Invalid {name}={value}")
cursor.execute(
"INSERT OR REPLACE INTO metadata(name, value) VALUES (?, ?);",
[name, value])
def validate(self, name, value):
is_valid = True
if name == 'mtime':
try:
val = datetime.fromtimestamp(int(value) / 1000.0)
value = f'{value} ({val.isoformat()})'
except ValueError:
is_valid = False
value = f'{value} (invalid)'
elif name in ('filesize', 'maskLevel', 'minzoom', 'maxzoom'):
try:
value = f'{int(value):,}'
except ValueError:
is_valid = False
value = f'{value} (invalid)'
elif name == 'json':
try:
val = json.loads(value)
if self.show_json:
value = f'(valid JSON value)'
else:
value = '(The value is a valid JSON, use --show-json for raw dump)'
res = []
for v in val["vector_layers"]:
desc = ""
if "description" in v:
desc = shorten_str(v["description"], 40)
fields = []
names = []
for fld in v["fields"].keys():
if fld.startswith("name:"):
names.append(fld[5:])
else:
fields.append(fld)
fields_str = ", ".join(v for v in fields)
if names:
fields_str += f", name:* ({shorten_str(','.join(names), 20)})"
res.append({
"layer": v["id"],
"minZ": v["minzoom"],
"maxZ": v["maxzoom"],
"fields": fields_str,
"description": desc
})
value += "\n\n" + tabulate(res, headers="keys")
if self.show_json:
value += "\n\n"
value += json.dumps(val, ensure_ascii=False, indent=2)
except ValueError:
is_valid = False
if self.show_json:
value = f'(invalid JSON value)\n{value}'
else:
value = f'(invalid JSON value, use --show-json to see it)'
return value, is_valid
| 40.571429 | 90 | 0.510102 | import json
import os
import sqlite3
from datetime import datetime
from pathlib import Path
import asyncpg
from tabulate import tabulate
from typing import Dict
from openmaptiles.pgutils import get_postgis_version, get_vector_layers
from openmaptiles.sqlite_utils import query
from openmaptiles.sqltomvt import MvtGenerator
from openmaptiles.tileset import Tileset
from openmaptiles.utils import print_err, Bbox, print_tile, shorten_str
class KeyFinder:
def __init__(self,
mbtiles,
show_size=None,
show_examples=None,
outfile: str = None,
zoom=None,
min_dup_count=None,
verbose=False) -> None:
self.mbtiles = mbtiles
if min_dup_count is not None:
min_dup_count = int(min_dup_count)
if min_dup_count < 2:
raise ValueError(f"min_dup_count must be an integer ≥ 2")
self.min_dup_count = min_dup_count
else:
self.min_dup_count = 50 if zoom and zoom > 12 else 20
self.use_stdout = outfile == '-'
self.zoom = zoom
self.verbose = verbose
if outfile:
self.outfile = True if self.use_stdout else Path(outfile)
else:
self.outfile = None
self.show_size = self.verbose if show_size is None else show_size
self.show_examples = self.verbose if show_examples is None else show_examples
def run(self):
if self.outfile and not self.use_stdout:
with self.outfile.open("w"):
pass
with sqlite3.connect(self.mbtiles) as conn:
results = []
if self.show_size:
sql = "SELECT cnt, dups.tile_id, LENGTH(tile_data) FROM (" \
" SELECT tile_id, COUNT(*) AS cnt FROM map " \
" GROUP BY tile_id HAVING cnt >= ?" \
") dups JOIN images ON images.tile_id = dups.tile_id"
sql_opts = [self.min_dup_count]
if self.zoom:
sql += f" WHERE zoom_level=?"
sql_opts.append(self.zoom)
else:
sql_opts = []
sql = "SELECT COUNT(*) cnt, tile_id FROM map"
if self.zoom:
sql += f" WHERE zoom_level=?"
sql_opts.append(self.zoom)
sql += " GROUP BY tile_id HAVING cnt >= ?"
sql_opts.append(self.min_dup_count)
for vals in query(conn, sql, sql_opts):
results.append(vals)
results.sort(reverse=True)
size = None
examples = None
for vals in results:
if len(vals) == 3:
count, tile_id, size = vals
else:
count, tile_id = vals
if self.show_examples:
example_sql = "SELECT zoom_level, tile_column, tile_row FROM map " \
"WHERE tile_id = ? LIMIT 5"
examples = [f'{z}/{x}/{y}' for z, x, y in
query(conn, example_sql, [tile_id])]
if self.verbose:
res = f"{tile_id} x {count:,}"
if self.show_size:
res += f', {size:,} bytes'
if self.show_examples:
res += ', examples: ' + ', '.join(examples)
print_err(res)
results = [v[1] for v in results]
if self.use_stdout:
for v in results:
print(v)
elif self.outfile:
with self.outfile.open("a") as f:
f.writelines([str(v) + '\n' for v in results])
return results
class Imputer:
def __init__(self, mbtiles, keys, zoom, outfile: str = None,
verbose=False) -> None:
self.mbtiles = mbtiles
self.keys = {k: 0 for k in keys}
self.zoom = zoom
self.use_stdout = outfile == '-'
self.verbose = verbose or not self.use_stdout
if outfile:
self.outfile = True if self.use_stdout else Path(outfile)
else:
self.outfile = None
def run(self):
with sqlite3.connect(self.mbtiles) as conn:
limit_to_keys = not self.outfile
if self.outfile and not self.use_stdout:
with self.outfile.open("w"):
pass # create or truncate file, but don't write anything to it yet
keyed_tiles = 0
nokey_tiles = 0
cursor = conn.cursor()
key_stats = self.keys
for with_key, without_key in self.tile_batches(conn, limit_to_keys):
without_key.sort()
if with_key:
with_key.sort()
for val in with_key:
key_stats[val[3]] += 1
cursor.executemany(
'INSERT OR IGNORE INTO map'
'(zoom_level, tile_column, tile_row, tile_id)'
' VALUES(?,?,?,?)',
with_key)
keyed_tiles += cursor.rowcount
conn.commit()
if without_key:
if self.use_stdout:
for v in without_key:
print(v, end='')
else:
with self.outfile.open("a") as f:
f.writelines(without_key)
nokey_tiles += len(without_key)
if self.verbose:
for k, c in key_stats.items():
print_err(f"{k} - added {c:,}")
print_err(f'Total imputed tiles: {keyed_tiles:,}')
if nokey_tiles:
print_err(f'Total tiles need to be generated: {nokey_tiles:,}')
def tile_batches(self, conn: sqlite3.Connection, limit_to_keys=False):
batch_size = 1000000
zoom = self.zoom
search_zoom = zoom - 1
sql = f"SELECT tile_column, tile_row, tile_id FROM map WHERE zoom_level=?"
sql_args = [search_zoom]
if limit_to_keys:
sql += f" and tile_id IN ({','.join(('?' * len(self.keys)))})"
sql_args += self.keys
with_key = []
without_key = []
max_y = 2 ** search_zoom - 1
for x, y, key in query(conn, sql, sql_args):
if limit_to_keys or key in self.keys:
with_key.append((zoom, x * 2, y * 2, key))
with_key.append((zoom, x * 2 + 1, y * 2, key))
with_key.append((zoom, x * 2, y * 2 + 1, key))
with_key.append((zoom, x * 2 + 1, y * 2 + 1, key))
else:
ry = max_y - y
without_key.append(f"{zoom}/{x * 2}/{ry * 2}\n")
without_key.append(f"{zoom}/{x * 2 + 1}/{ry * 2}\n")
without_key.append(f"{zoom}/{x * 2}/{ry * 2 + 1}\n")
without_key.append(f"{zoom}/{x * 2 + 1}/{ry * 2 + 1}\n")
if len(with_key) > batch_size or len(without_key) > batch_size:
yield with_key, without_key
with_key = []
without_key = []
if with_key or without_key:
yield with_key, without_key
class Metadata:
def __init__(self, mbtiles: str, show_json: bool = False,
show_ranges: bool = False) -> None:
self.mbtiles = mbtiles
self.show_json = show_json
self.show_ranges = show_ranges
def print_all(self, file: str = None):
file = file or self.mbtiles
data = self._get_metadata(file)
if data:
width = max((len(v) for v in data.keys()))
for name, value in sorted(data.items(),
key=lambda v: v[0] if v[0] != 'json' else 'zz'):
print(f"{name:{width}} {self.validate(name, value)[0]}")
else:
print(f"There are no values present in {file} metadata table")
if self.show_ranges:
with sqlite3.connect(file) as conn:
sql = """\
SELECT zoom_level, COUNT(*) AS count,
MIN(tile_column) AS min_column, MAX(tile_column) AS max_column,
MIN(tile_row) AS min_row, MAX(tile_row) AS max_row
FROM map
GROUP BY zoom_level
"""
res = []
for z, cnt, min_x, max_x, min_y, max_y in sorted(query(conn, sql, [])):
res.append({
"Zoom": z,
"Tile count": f"{cnt:,}",
"Found tile ranges": f"{min_x},{min_y} x {max_x},{max_y}",
})
print("\n" + tabulate(res, headers="keys"))
def get_value(self, name):
with sqlite3.connect(self.mbtiles) as conn:
cursor = conn.cursor()
cursor.execute("SELECT value FROM metadata WHERE name=?", [name])
row = cursor.fetchone()
if row is None:
print_err(f"Metadata field '{name}' is not found")
exit(1)
print(row[0])
def set_value(self, name, value):
if value is not None:
_, is_valid = self.validate(name, value)
if not is_valid:
raise ValueError(f"Invalid {name}={value}")
with sqlite3.connect(self.mbtiles) as conn:
cursor = conn.cursor()
if value is None:
cursor.execute("DELETE FROM metadata WHERE name=?;", [name])
else:
cursor.execute(
"INSERT OR REPLACE INTO metadata(name, value) VALUES (?, ?);",
[name, value])
async def generate(self, tileset, reset, auto_minmax,
pghost, pgport, dbname, user, password):
ts = Tileset.parse(tileset)
print(
f'Connecting to PostgreSQL at {pghost}:{pgport}, db={dbname}, user={user}...')
try:
async with asyncpg.create_pool(
database=dbname, host=pghost, port=pgport, user=user,
password=password, min_size=1, max_size=1,
) as pool:
async with pool.acquire() as conn:
mvt = MvtGenerator(
ts,
postgis_ver=await get_postgis_version(conn),
zoom='$1', x='$2', y='$3',
)
json_data = dict(vector_layers=await get_vector_layers(conn, mvt))
except ConnectionError as err:
print(f"Unable to connect to Postgres database: {err}")
raise err
metadata = dict(
name=ts.name,
format="pbf",
json=json.dumps(json_data, ensure_ascii=False, separators=(',', ':')),
bounds=",".join((str(v) for v in ts.bounds)),
center=",".join((str(v) for v in ts.center)),
minzoom=str(ts.minzoom),
maxzoom=str(ts.maxzoom),
attribution=ts.attribution,
description=ts.description,
version=ts.version,
id=ts.id,
)
self._update_metadata(metadata, auto_minmax, reset, self.mbtiles,
ts.center[2])
def copy(self, target_mbtiles, reset, auto_minmax):
metadata = self._get_metadata(self.mbtiles)
self._update_metadata(metadata, auto_minmax, reset, target_mbtiles)
def show_tile(self, zoom, x, y, show_names, summary):
with sqlite3.connect(self.mbtiles) as conn:
sql = "SELECT tile_data FROM tiles " \
"WHERE zoom_level=? AND tile_column=? AND tile_row=?"
for row in query(conn, sql, [zoom, x, y]):
print_tile(row[0], show_names, summary, f"{zoom}/{x}/{y}")
break
else:
print(f"Tile {zoom}/{x}/{y} not found")
def _update_metadata(self, metadata, auto_minmax, reset, file, center_zoom=None):
def update_from_env(param, env_var):
val = os.environ.get(env_var)
if val is not None:
metadata[param] = val
update_from_env('name', 'METADATA_NAME')
update_from_env('minzoom', 'MIN_ZOOM')
update_from_env('maxzoom', 'MAX_ZOOM')
update_from_env('attribution', 'METADATA_ATTRIBUTION')
update_from_env('description', 'METADATA_DESCRIPTION')
update_from_env('version', 'METADATA_VERSION')
metadata['filesize'] = os.path.getsize(file)
bbox_str = os.environ.get('BBOX')
if bbox_str:
bbox = Bbox(bbox=bbox_str,
center_zoom=os.environ.get('CENTER_ZOOM', center_zoom))
metadata["bounds"] = bbox.bounds_str()
metadata["center"] = bbox.center_str()
with sqlite3.connect(file) as conn:
cursor = conn.cursor()
if auto_minmax:
cursor.execute("SELECT MIN(zoom_level), MAX(zoom_level) FROM map")
min_z, max_z = cursor.fetchone()
if min_z is None:
raise ValueError("Unable to get min/max zoom - tile data is empty")
metadata["minzoom"] = min_z
metadata["maxzoom"] = max_z
self._update_metadata_db(cursor, metadata, reset)
conn.commit()
print(f"New metadata values in {file}")
self.print_all(file=file)
@staticmethod
def _get_metadata(file) -> Dict[str, str]:
with sqlite3.connect(file) as conn:
return {k: v for k, v in
query(conn, "SELECT name, value FROM metadata", [])}
def _update_metadata_db(self, cursor, metadata, reset):
if reset:
cursor.execute("DELETE FROM metadata;")
for name, value in metadata.items():
_, is_valid = self.validate(name, value)
if not is_valid:
raise ValueError(f"Invalid {name}={value}")
cursor.execute(
"INSERT OR REPLACE INTO metadata(name, value) VALUES (?, ?);",
[name, value])
def validate(self, name, value):
is_valid = True
if name == 'mtime':
try:
val = datetime.fromtimestamp(int(value) / 1000.0)
value = f'{value} ({val.isoformat()})'
except ValueError:
is_valid = False
value = f'{value} (invalid)'
elif name in ('filesize', 'maskLevel', 'minzoom', 'maxzoom'):
try:
value = f'{int(value):,}'
except ValueError:
is_valid = False
value = f'{value} (invalid)'
elif name == 'json':
try:
val = json.loads(value)
if self.show_json:
value = f'(valid JSON value)'
else:
value = '(The value is a valid JSON, use --show-json for raw dump)'
res = []
for v in val["vector_layers"]:
desc = ""
if "description" in v:
desc = shorten_str(v["description"], 40)
fields = []
names = []
for fld in v["fields"].keys():
if fld.startswith("name:"):
names.append(fld[5:])
else:
fields.append(fld)
fields_str = ", ".join(v for v in fields)
if names:
fields_str += f", name:* ({shorten_str(','.join(names), 20)})"
res.append({
"layer": v["id"],
"minZ": v["minzoom"],
"maxZ": v["maxzoom"],
"fields": fields_str,
"description": desc
})
value += "\n\n" + tabulate(res, headers="keys")
if self.show_json:
value += "\n\n"
value += json.dumps(val, ensure_ascii=False, indent=2)
except ValueError:
is_valid = False
if self.show_json:
value = f'(invalid JSON value)\n{value}'
else:
value = f'(invalid JSON value, use --show-json to see it)'
return value, is_valid
| true | true |
f724e92c199fe24cf4485298fdf880c51432d6c6 | 6,498 | py | Python | onnx_chainer/functions/__init__.py | blakexu/chainer | f3c2948af2796bb5096f628220fd7321120e1a75 | [
"MIT"
] | 1 | 2019-10-30T06:43:45.000Z | 2019-10-30T06:43:45.000Z | onnx_chainer/functions/__init__.py | blakexu/chainer | f3c2948af2796bb5096f628220fd7321120e1a75 | [
"MIT"
] | null | null | null | onnx_chainer/functions/__init__.py | blakexu/chainer | f3c2948af2796bb5096f628220fd7321120e1a75 | [
"MIT"
] | null | null | null | from onnx_chainer.functions.activation import convert_ClippedReLU # NOQA
from onnx_chainer.functions.activation import convert_ELU # NOQA
from onnx_chainer.functions.activation import convert_HardSigmoid # NOQA
from onnx_chainer.functions.activation import convert_LeakyReLU # NOQA
from onnx_chainer.functions.activation import convert_LogSoftmax # NOQA
from onnx_chainer.functions.activation import convert_PReLUFunction # NOQA
from onnx_chainer.functions.activation import convert_ReLU # NOQA
from onnx_chainer.functions.activation import convert_Selu # NOQA
from onnx_chainer.functions.activation import convert_Sigmoid # NOQA
from onnx_chainer.functions.activation import convert_Softmax # NOQA
from onnx_chainer.functions.activation import convert_Softplus # NOQA
from onnx_chainer.functions.activation import convert_Tanh # NOQA
from onnx_chainer.functions.array import convert_Cast # NOQA
from onnx_chainer.functions.array import convert_Concat # NOQA
from onnx_chainer.functions.array import convert_Copy # NOQA
from onnx_chainer.functions.array import convert_Depth2Space # NOQA
from onnx_chainer.functions.array import convert_Dstack # NOQA
from onnx_chainer.functions.array import convert_ExpandDims # NOQA
from onnx_chainer.functions.array import convert_GetItem # NOQA
from onnx_chainer.functions.array import convert_Hstack # NOQA
from onnx_chainer.functions.array import convert_Moveaxis # NOQA
from onnx_chainer.functions.array import convert_Pad # NOQA
from onnx_chainer.functions.array import convert_Repeat # NOQA
from onnx_chainer.functions.array import convert_Reshape # NOQA
from onnx_chainer.functions.array import convert_ResizeImages # NOQA
from onnx_chainer.functions.array import convert_Separate # NOQA
from onnx_chainer.functions.array import convert_Shape # NOQA
from onnx_chainer.functions.array import convert_Space2Depth # NOQA
from onnx_chainer.functions.array import convert_SplitAxis # NOQA
from onnx_chainer.functions.array import convert_Squeeze # NOQA
from onnx_chainer.functions.array import convert_Stack # NOQA
from onnx_chainer.functions.array import convert_Swapaxes # NOQA
from onnx_chainer.functions.array import convert_Tile # NOQA
from onnx_chainer.functions.array import convert_Transpose # NOQA
from onnx_chainer.functions.array import convert_Vstack # NOQA
from onnx_chainer.functions.array import convert_Where # NOQA
from onnx_chainer.functions.connection import convert_Convolution2DFunction # NOQA
from onnx_chainer.functions.connection import convert_ConvolutionND # NOQA
from onnx_chainer.functions.connection import convert_Deconvolution2DFunction # NOQA
from onnx_chainer.functions.connection import convert_DeconvolutionND # NOQA
from onnx_chainer.functions.connection import convert_EmbedIDFunction # NOQA
from onnx_chainer.functions.connection import convert_LinearFunction # NOQA
from onnx_chainer.functions.loss import convert_SoftmaxCrossEntropy # NOQA
from onnx_chainer.functions.math import convert_Absolute # NOQA
from onnx_chainer.functions.math import convert_Add # NOQA
from onnx_chainer.functions.math import convert_AddConstant # NOQA
from onnx_chainer.functions.math import convert_Arccos # NOQA
from onnx_chainer.functions.math import convert_Arcsin # NOQA
from onnx_chainer.functions.math import convert_Arctan # NOQA
from onnx_chainer.functions.math import convert_ArgMax # NOQA
from onnx_chainer.functions.math import convert_ArgMin # NOQA
from onnx_chainer.functions.math import convert_BroadcastTo # NOQA
from onnx_chainer.functions.math import convert_Clip # NOQA
from onnx_chainer.functions.math import convert_Cos # NOQA
from onnx_chainer.functions.math import convert_Cosh # NOQA
from onnx_chainer.functions.math import convert_Div # NOQA
from onnx_chainer.functions.math import convert_DivFromConstant # NOQA
from onnx_chainer.functions.math import convert_Exp # NOQA
from onnx_chainer.functions.math import convert_Identity # NOQA
from onnx_chainer.functions.math import convert_LinearInterpolate # NOQA
from onnx_chainer.functions.math import convert_Log # NOQA
from onnx_chainer.functions.math import convert_LogSumExp # NOQA
from onnx_chainer.functions.math import convert_MatMul # NOQA
from onnx_chainer.functions.math import convert_Max # NOQA
from onnx_chainer.functions.math import convert_Maximum # NOQA
from onnx_chainer.functions.math import convert_Mean # NOQA
from onnx_chainer.functions.math import convert_Min # NOQA
from onnx_chainer.functions.math import convert_Minimum # NOQA
from onnx_chainer.functions.math import convert_Mul # NOQA
from onnx_chainer.functions.math import convert_MulConstant # NOQA
from onnx_chainer.functions.math import convert_Neg # NOQA
from onnx_chainer.functions.math import convert_PowConstVar # NOQA
from onnx_chainer.functions.math import convert_PowVarConst # NOQA
from onnx_chainer.functions.math import convert_PowVarVar # NOQA
from onnx_chainer.functions.math import convert_Prod # NOQA
from onnx_chainer.functions.math import convert_RsqrtGPU # NOQA
from onnx_chainer.functions.math import convert_Sin # NOQA
from onnx_chainer.functions.math import convert_Sinh # NOQA
from onnx_chainer.functions.math import convert_Sqrt # NOQA
from onnx_chainer.functions.math import convert_Square # NOQA
from onnx_chainer.functions.math import convert_Sub # NOQA
from onnx_chainer.functions.math import convert_SubFromConstant # NOQA
from onnx_chainer.functions.math import convert_Sum # NOQA
from onnx_chainer.functions.math import convert_Tan # NOQA
from onnx_chainer.functions.noise import convert_Dropout # NOQA
from onnx_chainer.functions.normalization import convert_BatchNormalization # NOQA
from onnx_chainer.functions.normalization import convert_FixedBatchNormalization # NOQA
from onnx_chainer.functions.normalization import convert_GroupNormalization # NOQA
from onnx_chainer.functions.normalization import convert_LocalResponseNormalization # NOQA
from onnx_chainer.functions.normalization import convert_NormalizeL2 # NOQA
from onnx_chainer.functions.pooling import convert_AveragePooling2D # NOQA
from onnx_chainer.functions.pooling import convert_AveragePoolingND # NOQA
from onnx_chainer.functions.pooling import convert_MaxPooling2D # NOQA
from onnx_chainer.functions.pooling import convert_MaxPoolingND # NOQA
from onnx_chainer.functions.pooling import convert_ROIPooling2D # NOQA
from onnx_chainer.functions.pooling import convert_Unpooling2D # NOQA
| 62.480769 | 91 | 0.851185 | from onnx_chainer.functions.activation import convert_ClippedReLU
from onnx_chainer.functions.activation import convert_ELU
from onnx_chainer.functions.activation import convert_HardSigmoid
from onnx_chainer.functions.activation import convert_LeakyReLU
from onnx_chainer.functions.activation import convert_LogSoftmax
from onnx_chainer.functions.activation import convert_PReLUFunction
from onnx_chainer.functions.activation import convert_ReLU
from onnx_chainer.functions.activation import convert_Selu
from onnx_chainer.functions.activation import convert_Sigmoid
from onnx_chainer.functions.activation import convert_Softmax
from onnx_chainer.functions.activation import convert_Softplus
from onnx_chainer.functions.activation import convert_Tanh
from onnx_chainer.functions.array import convert_Cast
from onnx_chainer.functions.array import convert_Concat
from onnx_chainer.functions.array import convert_Copy
from onnx_chainer.functions.array import convert_Depth2Space
from onnx_chainer.functions.array import convert_Dstack
from onnx_chainer.functions.array import convert_ExpandDims
from onnx_chainer.functions.array import convert_GetItem
from onnx_chainer.functions.array import convert_Hstack
from onnx_chainer.functions.array import convert_Moveaxis
from onnx_chainer.functions.array import convert_Pad
from onnx_chainer.functions.array import convert_Repeat
from onnx_chainer.functions.array import convert_Reshape
from onnx_chainer.functions.array import convert_ResizeImages
from onnx_chainer.functions.array import convert_Separate
from onnx_chainer.functions.array import convert_Shape
from onnx_chainer.functions.array import convert_Space2Depth
from onnx_chainer.functions.array import convert_SplitAxis
from onnx_chainer.functions.array import convert_Squeeze
from onnx_chainer.functions.array import convert_Stack
from onnx_chainer.functions.array import convert_Swapaxes
from onnx_chainer.functions.array import convert_Tile
from onnx_chainer.functions.array import convert_Transpose
from onnx_chainer.functions.array import convert_Vstack
from onnx_chainer.functions.array import convert_Where
from onnx_chainer.functions.connection import convert_Convolution2DFunction
from onnx_chainer.functions.connection import convert_ConvolutionND
from onnx_chainer.functions.connection import convert_Deconvolution2DFunction
from onnx_chainer.functions.connection import convert_DeconvolutionND
from onnx_chainer.functions.connection import convert_EmbedIDFunction
from onnx_chainer.functions.connection import convert_LinearFunction
from onnx_chainer.functions.loss import convert_SoftmaxCrossEntropy
from onnx_chainer.functions.math import convert_Absolute
from onnx_chainer.functions.math import convert_Add
from onnx_chainer.functions.math import convert_AddConstant
from onnx_chainer.functions.math import convert_Arccos
from onnx_chainer.functions.math import convert_Arcsin
from onnx_chainer.functions.math import convert_Arctan
from onnx_chainer.functions.math import convert_ArgMax
from onnx_chainer.functions.math import convert_ArgMin
from onnx_chainer.functions.math import convert_BroadcastTo
from onnx_chainer.functions.math import convert_Clip
from onnx_chainer.functions.math import convert_Cos
from onnx_chainer.functions.math import convert_Cosh
from onnx_chainer.functions.math import convert_Div
from onnx_chainer.functions.math import convert_DivFromConstant
from onnx_chainer.functions.math import convert_Exp
from onnx_chainer.functions.math import convert_Identity
from onnx_chainer.functions.math import convert_LinearInterpolate
from onnx_chainer.functions.math import convert_Log
from onnx_chainer.functions.math import convert_LogSumExp
from onnx_chainer.functions.math import convert_MatMul
from onnx_chainer.functions.math import convert_Max
from onnx_chainer.functions.math import convert_Maximum
from onnx_chainer.functions.math import convert_Mean
from onnx_chainer.functions.math import convert_Min
from onnx_chainer.functions.math import convert_Minimum
from onnx_chainer.functions.math import convert_Mul
from onnx_chainer.functions.math import convert_MulConstant
from onnx_chainer.functions.math import convert_Neg
from onnx_chainer.functions.math import convert_PowConstVar
from onnx_chainer.functions.math import convert_PowVarConst
from onnx_chainer.functions.math import convert_PowVarVar
from onnx_chainer.functions.math import convert_Prod
from onnx_chainer.functions.math import convert_RsqrtGPU
from onnx_chainer.functions.math import convert_Sin
from onnx_chainer.functions.math import convert_Sinh
from onnx_chainer.functions.math import convert_Sqrt
from onnx_chainer.functions.math import convert_Square
from onnx_chainer.functions.math import convert_Sub
from onnx_chainer.functions.math import convert_SubFromConstant
from onnx_chainer.functions.math import convert_Sum
from onnx_chainer.functions.math import convert_Tan
from onnx_chainer.functions.noise import convert_Dropout
from onnx_chainer.functions.normalization import convert_BatchNormalization
from onnx_chainer.functions.normalization import convert_FixedBatchNormalization
from onnx_chainer.functions.normalization import convert_GroupNormalization
from onnx_chainer.functions.normalization import convert_LocalResponseNormalization
from onnx_chainer.functions.normalization import convert_NormalizeL2
from onnx_chainer.functions.pooling import convert_AveragePooling2D
from onnx_chainer.functions.pooling import convert_AveragePoolingND
from onnx_chainer.functions.pooling import convert_MaxPooling2D
from onnx_chainer.functions.pooling import convert_MaxPoolingND
from onnx_chainer.functions.pooling import convert_ROIPooling2D
from onnx_chainer.functions.pooling import convert_Unpooling2D
| true | true |
f724eb2bf2b936eabc0cf6b12314246ce61bb4cc | 244 | py | Python | crypten/common/__init__.py | vreis/CrypTen-2 | 839a751277a901e4edd9166a720fb3a29deac641 | [
"MIT"
] | 2 | 2020-03-23T18:32:13.000Z | 2020-12-11T10:54:08.000Z | crypten/common/__init__.py | vreis/CrypTen-2 | 839a751277a901e4edd9166a720fb3a29deac641 | [
"MIT"
] | null | null | null | crypten/common/__init__.py | vreis/CrypTen-2 | 839a751277a901e4edd9166a720fb3a29deac641 | [
"MIT"
] | 2 | 2020-04-15T19:28:02.000Z | 2020-04-16T01:59:30.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ["rng", "tensor_types", "util"]
| 27.111111 | 65 | 0.721311 |
__all__ = ["rng", "tensor_types", "util"]
| true | true |
f724ebf9502cb921a15388d5af77f9d5423ced5c | 104,651 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_firewall_policy6.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_firewall_policy6.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_firewall_policy6.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_policy6
short_description: Configure IPv6 policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and policy6 category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
firewall_policy6:
description:
- Configure IPv6 policies.
default: null
type: dict
suboptions:
action:
description:
- Policy action (allow/deny/ipsec).
type: str
choices:
- accept
- deny
- ipsec
anti_replay:
description:
- Enable/disable anti-replay check.
type: str
choices:
- enable
- disable
app_category:
description:
- Application category ID list.
type: list
suboptions:
id:
description:
- Category IDs.
required: true
type: int
app_group:
description:
- Application group names.
type: list
suboptions:
name:
description:
- Application group names. Source application.group.name.
required: true
type: str
application:
description:
- Application ID list.
type: list
suboptions:
id:
description:
- Application IDs.
required: true
type: int
application_list:
description:
- Name of an existing Application list. Source application.list.name.
type: str
auto_asic_offload:
description:
- Enable/disable policy traffic ASIC offloading.
type: str
choices:
- enable
- disable
av_profile:
description:
- Name of an existing Antivirus profile. Source antivirus.profile.name.
type: str
cifs_profile:
description:
- Name of an existing CIFS profile. Source cifs.profile.name.
type: str
comments:
description:
- Comment.
type: str
custom_log_fields:
description:
- Log field index numbers to append custom log fields to log messages for this policy.
type: list
suboptions:
field_id:
description:
- Custom log field. Source log.custom-field.id.
type: str
devices:
description:
- Names of devices or device groups that can be matched by the policy.
type: list
suboptions:
name:
description:
- Device or group name. Source user.device.alias user.device-group.name user.device-category.name.
required: true
type: str
diffserv_forward:
description:
- Enable to change packet"s DiffServ values to the specified diffservcode-forward value.
type: str
choices:
- enable
- disable
diffserv_reverse:
description:
- Enable to change packet"s reverse (reply) DiffServ values to the specified diffservcode-rev value.
type: str
choices:
- enable
- disable
diffservcode_forward:
description:
- Change packet"s DiffServ to this value.
type: str
diffservcode_rev:
description:
- Change packet"s reverse (reply) DiffServ to this value.
type: str
dlp_sensor:
description:
- Name of an existing DLP sensor. Source dlp.sensor.name.
type: str
dnsfilter_profile:
description:
- Name of an existing DNS filter profile. Source dnsfilter.profile.name.
type: str
dscp_match:
description:
- Enable DSCP check.
type: str
choices:
- enable
- disable
dscp_negate:
description:
- Enable negated DSCP match.
type: str
choices:
- enable
- disable
dscp_value:
description:
- DSCP value.
type: str
dsri:
description:
- Enable DSRI to ignore HTTP server responses.
type: str
choices:
- enable
- disable
dstaddr:
description:
- Destination address and address group names.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name firewall.vip6.name firewall.vipgrp6.name.
required: true
type: str
dstaddr_negate:
description:
- When enabled dstaddr specifies what the destination address must NOT be.
type: str
choices:
- enable
- disable
dstintf:
description:
- Outgoing (egress) interface.
type: list
suboptions:
name:
description:
- Interface name. Source system.interface.name system.zone.name.
required: true
type: str
emailfilter_profile:
description:
- Name of an existing email filter profile. Source emailfilter.profile.name.
type: str
firewall_session_dirty:
description:
- How to handle sessions if the configuration of this firewall policy changes.
type: str
choices:
- check-all
- check-new
fixedport:
description:
- Enable to prevent source NAT from changing a session"s source port.
type: str
choices:
- enable
- disable
fsso_groups:
description:
- Names of FSSO groups.
type: list
suboptions:
name:
description:
- Names of FSSO groups. Source user.adgrp.name.
required: true
type: str
global_label:
description:
- Label for the policy that appears when the GUI is in Global View mode.
type: str
groups:
description:
- Names of user groups that can authenticate with this policy.
type: list
suboptions:
name:
description:
- Group name. Source user.group.name.
required: true
type: str
http_policy_redirect:
description:
- Redirect HTTP(S) traffic to matching transparent web proxy policy.
type: str
choices:
- enable
- disable
icap_profile:
description:
- Name of an existing ICAP profile. Source icap.profile.name.
type: str
inbound:
description:
- 'Policy-based IPsec VPN: only traffic from the remote network can initiate a VPN.'
type: str
choices:
- enable
- disable
inspection_mode:
description:
- Policy inspection mode (Flow/proxy). Default is Flow mode.
type: str
choices:
- proxy
- flow
ippool:
description:
- Enable to use IP Pools for source NAT.
type: str
choices:
- enable
- disable
ips_sensor:
description:
- Name of an existing IPS sensor. Source ips.sensor.name.
type: str
label:
description:
- Label for the policy that appears when the GUI is in Section View mode.
type: str
logtraffic:
description:
- Enable or disable logging. Log all sessions or security profile sessions.
type: str
choices:
- all
- utm
- disable
logtraffic_start:
description:
- Record logs when a session starts and ends.
type: str
choices:
- enable
- disable
mms_profile:
description:
- Name of an existing MMS profile. Source firewall.mms-profile.name.
type: str
name:
description:
- Policy name.
type: str
nat:
description:
- Enable/disable source NAT.
type: str
choices:
- enable
- disable
natinbound:
description:
- 'Policy-based IPsec VPN: apply destination NAT to inbound traffic.'
type: str
choices:
- enable
- disable
natoutbound:
description:
- 'Policy-based IPsec VPN: apply source NAT to outbound traffic.'
type: str
choices:
- enable
- disable
np_acceleration:
description:
- Enable/disable UTM Network Processor acceleration.
type: str
choices:
- enable
- disable
outbound:
description:
- 'Policy-based IPsec VPN: only traffic from the internal network can initiate a VPN.'
type: str
choices:
- enable
- disable
per_ip_shaper:
description:
- Per-IP traffic shaper. Source firewall.shaper.per-ip-shaper.name.
type: str
policyid:
description:
- Policy ID.
required: true
type: int
poolname:
description:
- IP Pool names.
type: list
suboptions:
name:
description:
- IP pool name. Source firewall.ippool6.name.
required: true
type: str
profile_group:
description:
- Name of profile group. Source firewall.profile-group.name.
type: str
profile_protocol_options:
description:
- Name of an existing Protocol options profile. Source firewall.profile-protocol-options.name.
type: str
profile_type:
description:
- Determine whether the firewall policy allows security profile groups or single profiles only.
type: str
choices:
- single
- group
replacemsg_override_group:
description:
- Override the default replacement message group for this policy. Source system.replacemsg-group.name.
type: str
rsso:
description:
- Enable/disable RADIUS single sign-on (RSSO).
type: str
choices:
- enable
- disable
schedule:
description:
- Schedule name. Source firewall.schedule.onetime.name firewall.schedule.recurring.name firewall.schedule.group.name.
type: str
send_deny_packet:
description:
- Enable/disable return of deny-packet.
type: str
choices:
- enable
- disable
service:
description:
- Service and service group names.
type: list
suboptions:
name:
description:
- Address name. Source firewall.service.custom.name firewall.service.group.name.
required: true
type: str
service_negate:
description:
- When enabled service specifies what the service must NOT be.
type: str
choices:
- enable
- disable
session_ttl:
description:
- Session TTL in seconds for sessions accepted by this policy. 0 means use the system default session TTL.
type: int
spamfilter_profile:
description:
- Name of an existing Spam filter profile. Source spamfilter.profile.name.
type: str
srcaddr:
description:
- Source address and address group names.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
srcaddr_negate:
description:
- When enabled srcaddr specifies what the source address must NOT be.
type: str
choices:
- enable
- disable
srcintf:
description:
- Incoming (ingress) interface.
type: list
suboptions:
name:
description:
- Interface name. Source system.zone.name system.interface.name.
required: true
type: str
ssh_filter_profile:
description:
- Name of an existing SSH filter profile. Source ssh-filter.profile.name.
type: str
ssh_policy_redirect:
description:
- Redirect SSH traffic to matching transparent proxy policy.
type: str
choices:
- enable
- disable
ssl_mirror:
description:
- Enable to copy decrypted SSL traffic to a FortiGate interface (called SSL mirroring).
type: str
choices:
- enable
- disable
ssl_mirror_intf:
description:
- SSL mirror interface name.
type: list
suboptions:
name:
description:
- Interface name. Source system.zone.name system.interface.name.
required: true
type: str
ssl_ssh_profile:
description:
- Name of an existing SSL SSH profile. Source firewall.ssl-ssh-profile.name.
type: str
status:
description:
- Enable or disable this policy.
type: str
choices:
- enable
- disable
tcp_mss_receiver:
description:
- Receiver TCP maximum segment size (MSS).
type: int
tcp_mss_sender:
description:
- Sender TCP maximum segment size (MSS).
type: int
tcp_session_without_syn:
description:
- Enable/disable creation of TCP session without SYN flag.
type: str
choices:
- all
- data-only
- disable
timeout_send_rst:
description:
- Enable/disable sending RST packets when TCP sessions expire.
type: str
choices:
- enable
- disable
tos:
description:
- ToS (Type of Service) value used for comparison.
type: str
tos_mask:
description:
- Non-zero bit positions are used for comparison while zero bit positions are ignored.
type: str
tos_negate:
description:
- Enable negated TOS match.
type: str
choices:
- enable
- disable
traffic_shaper:
description:
- Reverse traffic shaper. Source firewall.shaper.traffic-shaper.name.
type: str
traffic_shaper_reverse:
description:
- Reverse traffic shaper. Source firewall.shaper.traffic-shaper.name.
type: str
url_category:
description:
- URL category ID list.
type: list
suboptions:
id:
description:
- URL category ID.
required: true
type: int
users:
description:
- Names of individual users that can authenticate with this policy.
type: list
suboptions:
name:
description:
- Names of individual users that can authenticate with this policy. Source user.local.name.
required: true
type: str
utm_status:
description:
- Enable AV/web/ips protection profile.
type: str
choices:
- enable
- disable
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
type: str
vlan_cos_fwd:
description:
- 'VLAN forward direction user priority: 255 passthrough, 0 lowest, 7 highest'
type: int
vlan_cos_rev:
description:
- 'VLAN reverse direction user priority: 255 passthrough, 0 lowest, 7 highest'
type: int
vlan_filter:
description:
- Set VLAN filters.
type: str
voip_profile:
description:
- Name of an existing VoIP profile. Source voip.profile.name.
type: str
vpntunnel:
description:
- 'Policy-based IPsec VPN: name of the IPsec VPN Phase 1. Source vpn.ipsec.phase1.name vpn.ipsec.manualkey.name.'
type: str
waf_profile:
description:
- Name of an existing Web application firewall profile. Source waf.profile.name.
type: str
webcache:
description:
- Enable/disable web cache.
type: str
choices:
- enable
- disable
webcache_https:
description:
- Enable/disable web cache for HTTPS.
type: str
choices:
- disable
- enable
webfilter_profile:
description:
- Name of an existing Web filter profile. Source webfilter.profile.name.
type: str
webproxy_forward_server:
description:
- Web proxy forward server name. Source web-proxy.forward-server.name web-proxy.forward-server-group.name.
type: str
webproxy_profile:
description:
- Webproxy profile name. Source web-proxy.profile.name.
type: str
'''
EXAMPLES = '''
- collections:
- fortinet.fortios
connection: httpapi
hosts: fortigate01
vars:
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
vdom: root
tasks:
- name: fortios_firewall_policy6
fortios_firewall_policy6:
vdom: root
state: present
firewall_policy6:
action: deny
anti_replay: enable
auto_asic_offload: enable
diffserv_forward: disable
diffserv_reverse: disable
diffservcode_forward: '000000'
diffservcode_rev: '000000'
dsri: disable
dstaddr:
- name: all
dstaddr_negate: disable
dstintf:
- name: port3
firewall_session_dirty: check-all
fixedport: disable
http_policy_redirect: disable
inbound: disable
inspection_mode: flow
ippool: disable
logtraffic: disable
logtraffic_start: disable
name: policy6p1
nat: disable
natinbound: disable
natoutbound: disable
outbound: disable
policyid: 1
profile_type: single
rsso: disable
schedule: always
send_deny_packet: disable
service:
- name: ALL
service_negate: disable
srcaddr:
- name: all
srcaddr_negate: disable
srcintf:
- name: port4
ssh_policy_redirect: disable
ssl_mirror: disable
status: enable
tcp_mss_receiver: 0
tcp_mss_sender: 0
tcp_session_without_syn: disable
timeout_send_rst: disable
tos: '0x00'
tos_mask: '0x00'
tos_negate: disable
utm_status: disable
vlan_cos_fwd: 0
vlan_cos_rev: 0
webcache: disable
webcache_https: disable
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import is_same_comparison
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import serialize
def filter_firewall_policy6_data(json):
option_list = ['action', 'anti_replay', 'app_category',
'app_group', 'application', 'application_list',
'auto_asic_offload', 'av_profile', 'cifs_profile',
'comments', 'custom_log_fields', 'devices',
'diffserv_forward', 'diffserv_reverse', 'diffservcode_forward',
'diffservcode_rev', 'dlp_sensor', 'dnsfilter_profile',
'dscp_match', 'dscp_negate', 'dscp_value',
'dsri', 'dstaddr', 'dstaddr_negate',
'dstintf', 'emailfilter_profile', 'firewall_session_dirty',
'fixedport', 'fsso_groups', 'global_label',
'groups', 'http_policy_redirect', 'icap_profile',
'inbound', 'inspection_mode', 'ippool',
'ips_sensor', 'label', 'logtraffic',
'logtraffic_start', 'mms_profile', 'name',
'nat', 'natinbound', 'natoutbound',
'np_acceleration', 'outbound', 'per_ip_shaper',
'policyid', 'poolname', 'profile_group',
'profile_protocol_options', 'profile_type', 'replacemsg_override_group',
'rsso', 'schedule', 'send_deny_packet',
'service', 'service_negate', 'session_ttl',
'spamfilter_profile', 'srcaddr', 'srcaddr_negate',
'srcintf', 'ssh_filter_profile', 'ssh_policy_redirect',
'ssl_mirror', 'ssl_mirror_intf', 'ssl_ssh_profile',
'status', 'tcp_mss_receiver', 'tcp_mss_sender',
'tcp_session_without_syn', 'timeout_send_rst', 'tos',
'tos_mask', 'tos_negate', 'traffic_shaper',
'traffic_shaper_reverse', 'url_category', 'users',
'utm_status', 'uuid', 'vlan_cos_fwd',
'vlan_cos_rev', 'vlan_filter', 'voip_profile',
'vpntunnel', 'waf_profile', 'webcache',
'webcache_https', 'webfilter_profile', 'webproxy_forward_server',
'webproxy_profile']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_policy6(data, fos, check_mode=False):
vdom = data['vdom']
state = data['state']
firewall_policy6_data = data['firewall_policy6']
filtered_data = underscore_to_hyphen(filter_firewall_policy6_data(firewall_policy6_data))
# check_mode starts from here
if check_mode:
mkey = fos.get_mkey('firewall', 'policy6', filtered_data, vdom=vdom)
current_data = fos.get('firewall', 'policy6', vdom=vdom, mkey=mkey)
is_existed = current_data and current_data.get('http_status') == 200 \
and isinstance(current_data.get('results'), list) \
and len(current_data['results']) > 0
# 2. if it exists and the state is 'present' then compare current settings with desired
if state == 'present' or state is True:
if mkey is None:
return False, True, filtered_data
# if mkey exists then compare each other
# record exits and they're matched or not
if is_existed:
is_same = is_same_comparison(
serialize(current_data['results'][0]), serialize(filtered_data))
return False, not is_same, filtered_data
# record does not exist
return False, True, filtered_data
if state == 'absent':
if mkey is None:
return False, False, filtered_data
if is_existed:
return False, True, filtered_data
return False, False, filtered_data
return True, False, {'reason: ': 'Must provide state parameter'}
if state == "present" or state is True:
return fos.set('firewall',
'policy6',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'policy6',
mkey=filtered_data['policyid'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_firewall(data, fos, check_mode):
fos.do_member_operation('firewall_policy6')
if data['firewall_policy6']:
resp = firewall_policy6(data, fos, check_mode)
else:
fos._module.fail_json(msg='missing task body: %s' % ('firewall_policy6'))
if check_mode:
return resp
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "list",
"children": {
"per_ip_shaper": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webproxy_forward_server": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"dscp_match": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"diffserv_reverse": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"traffic_shaper_reverse": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"uuid": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vpntunnel": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dlp_sensor": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"custom_log_fields": {
"type": "list",
"children": {
"field_id": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"voip_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"np_acceleration": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fsso_groups": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"emailfilter_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"natoutbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"logtraffic": {
"type": "string",
"options": [
{
"value": "all",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "utm",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"spamfilter_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"ssh_filter_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vlan_cos_rev": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tcp_session_without_syn": {
"type": "string",
"options": [
{
"value": "all",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "data-only",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"url_category": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"session_ttl": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"mms_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"poolname": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_ssh_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"comments": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"label": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"app_category": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"profile_type": {
"type": "string",
"options": [
{
"value": "single",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "group",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"schedule": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"diffservcode_rev": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tcp_mss_sender": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstintf": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"srcaddr_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_mirror_intf": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dscp_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"auto_asic_offload": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vlan_filter": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"srcintf": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssh_policy_redirect": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"anti_replay": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"action": {
"type": "string",
"options": [
{
"value": "accept",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "deny",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "ipsec",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tos_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"replacemsg_override_group": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"http_policy_redirect": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"groups": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"icap_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"application_list": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"service_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"send_deny_packet": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webcache": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"ippool": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"service": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webproxy_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"tos": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"dnsfilter_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"profile_group": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tcp_mss_receiver": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"global_label": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"inbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"srcaddr": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tos_mask": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"logtraffic_start": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webcache_https": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"ips_sensor": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"devices": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.2.3": True,
"v6.0.5": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"rsso": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"traffic_shaper": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"diffserv_forward": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"natinbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"cifs_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"vlan_cos_fwd": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fixedport": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dsri": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"outbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"application": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_mirror": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"nat": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"timeout_send_rst": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"status": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"diffservcode_forward": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"users": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dscp_value": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"utm_status": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"waf_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"policyid": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"firewall_session_dirty": {
"type": "string",
"options": [
{
"value": "check-all",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "check-new",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webfilter_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstaddr_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"av_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstaddr": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"app_group": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"profile_protocol_options": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"inspection_mode": {
"type": "string",
"options": [
{
"value": "proxy",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "flow",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'policyid'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"firewall_policy6": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["firewall_policy6"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["firewall_policy6"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "firewall_policy6")
is_error, has_changed, result = fortios_firewall(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 32.230059 | 144 | 0.32553 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_policy6
short_description: Configure IPv6 policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and policy6 category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
firewall_policy6:
description:
- Configure IPv6 policies.
default: null
type: dict
suboptions:
action:
description:
- Policy action (allow/deny/ipsec).
type: str
choices:
- accept
- deny
- ipsec
anti_replay:
description:
- Enable/disable anti-replay check.
type: str
choices:
- enable
- disable
app_category:
description:
- Application category ID list.
type: list
suboptions:
id:
description:
- Category IDs.
required: true
type: int
app_group:
description:
- Application group names.
type: list
suboptions:
name:
description:
- Application group names. Source application.group.name.
required: true
type: str
application:
description:
- Application ID list.
type: list
suboptions:
id:
description:
- Application IDs.
required: true
type: int
application_list:
description:
- Name of an existing Application list. Source application.list.name.
type: str
auto_asic_offload:
description:
- Enable/disable policy traffic ASIC offloading.
type: str
choices:
- enable
- disable
av_profile:
description:
- Name of an existing Antivirus profile. Source antivirus.profile.name.
type: str
cifs_profile:
description:
- Name of an existing CIFS profile. Source cifs.profile.name.
type: str
comments:
description:
- Comment.
type: str
custom_log_fields:
description:
- Log field index numbers to append custom log fields to log messages for this policy.
type: list
suboptions:
field_id:
description:
- Custom log field. Source log.custom-field.id.
type: str
devices:
description:
- Names of devices or device groups that can be matched by the policy.
type: list
suboptions:
name:
description:
- Device or group name. Source user.device.alias user.device-group.name user.device-category.name.
required: true
type: str
diffserv_forward:
description:
- Enable to change packet"s DiffServ values to the specified diffservcode-forward value.
type: str
choices:
- enable
- disable
diffserv_reverse:
description:
- Enable to change packet"s reverse (reply) DiffServ values to the specified diffservcode-rev value.
type: str
choices:
- enable
- disable
diffservcode_forward:
description:
- Change packet"s DiffServ to this value.
type: str
diffservcode_rev:
description:
- Change packet"s reverse (reply) DiffServ to this value.
type: str
dlp_sensor:
description:
- Name of an existing DLP sensor. Source dlp.sensor.name.
type: str
dnsfilter_profile:
description:
- Name of an existing DNS filter profile. Source dnsfilter.profile.name.
type: str
dscp_match:
description:
- Enable DSCP check.
type: str
choices:
- enable
- disable
dscp_negate:
description:
- Enable negated DSCP match.
type: str
choices:
- enable
- disable
dscp_value:
description:
- DSCP value.
type: str
dsri:
description:
- Enable DSRI to ignore HTTP server responses.
type: str
choices:
- enable
- disable
dstaddr:
description:
- Destination address and address group names.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name firewall.vip6.name firewall.vipgrp6.name.
required: true
type: str
dstaddr_negate:
description:
- When enabled dstaddr specifies what the destination address must NOT be.
type: str
choices:
- enable
- disable
dstintf:
description:
- Outgoing (egress) interface.
type: list
suboptions:
name:
description:
- Interface name. Source system.interface.name system.zone.name.
required: true
type: str
emailfilter_profile:
description:
- Name of an existing email filter profile. Source emailfilter.profile.name.
type: str
firewall_session_dirty:
description:
- How to handle sessions if the configuration of this firewall policy changes.
type: str
choices:
- check-all
- check-new
fixedport:
description:
- Enable to prevent source NAT from changing a session"s source port.
type: str
choices:
- enable
- disable
fsso_groups:
description:
- Names of FSSO groups.
type: list
suboptions:
name:
description:
- Names of FSSO groups. Source user.adgrp.name.
required: true
type: str
global_label:
description:
- Label for the policy that appears when the GUI is in Global View mode.
type: str
groups:
description:
- Names of user groups that can authenticate with this policy.
type: list
suboptions:
name:
description:
- Group name. Source user.group.name.
required: true
type: str
http_policy_redirect:
description:
- Redirect HTTP(S) traffic to matching transparent web proxy policy.
type: str
choices:
- enable
- disable
icap_profile:
description:
- Name of an existing ICAP profile. Source icap.profile.name.
type: str
inbound:
description:
- 'Policy-based IPsec VPN: only traffic from the remote network can initiate a VPN.'
type: str
choices:
- enable
- disable
inspection_mode:
description:
- Policy inspection mode (Flow/proxy). Default is Flow mode.
type: str
choices:
- proxy
- flow
ippool:
description:
- Enable to use IP Pools for source NAT.
type: str
choices:
- enable
- disable
ips_sensor:
description:
- Name of an existing IPS sensor. Source ips.sensor.name.
type: str
label:
description:
- Label for the policy that appears when the GUI is in Section View mode.
type: str
logtraffic:
description:
- Enable or disable logging. Log all sessions or security profile sessions.
type: str
choices:
- all
- utm
- disable
logtraffic_start:
description:
- Record logs when a session starts and ends.
type: str
choices:
- enable
- disable
mms_profile:
description:
- Name of an existing MMS profile. Source firewall.mms-profile.name.
type: str
name:
description:
- Policy name.
type: str
nat:
description:
- Enable/disable source NAT.
type: str
choices:
- enable
- disable
natinbound:
description:
- 'Policy-based IPsec VPN: apply destination NAT to inbound traffic.'
type: str
choices:
- enable
- disable
natoutbound:
description:
- 'Policy-based IPsec VPN: apply source NAT to outbound traffic.'
type: str
choices:
- enable
- disable
np_acceleration:
description:
- Enable/disable UTM Network Processor acceleration.
type: str
choices:
- enable
- disable
outbound:
description:
- 'Policy-based IPsec VPN: only traffic from the internal network can initiate a VPN.'
type: str
choices:
- enable
- disable
per_ip_shaper:
description:
- Per-IP traffic shaper. Source firewall.shaper.per-ip-shaper.name.
type: str
policyid:
description:
- Policy ID.
required: true
type: int
poolname:
description:
- IP Pool names.
type: list
suboptions:
name:
description:
- IP pool name. Source firewall.ippool6.name.
required: true
type: str
profile_group:
description:
- Name of profile group. Source firewall.profile-group.name.
type: str
profile_protocol_options:
description:
- Name of an existing Protocol options profile. Source firewall.profile-protocol-options.name.
type: str
profile_type:
description:
- Determine whether the firewall policy allows security profile groups or single profiles only.
type: str
choices:
- single
- group
replacemsg_override_group:
description:
- Override the default replacement message group for this policy. Source system.replacemsg-group.name.
type: str
rsso:
description:
- Enable/disable RADIUS single sign-on (RSSO).
type: str
choices:
- enable
- disable
schedule:
description:
- Schedule name. Source firewall.schedule.onetime.name firewall.schedule.recurring.name firewall.schedule.group.name.
type: str
send_deny_packet:
description:
- Enable/disable return of deny-packet.
type: str
choices:
- enable
- disable
service:
description:
- Service and service group names.
type: list
suboptions:
name:
description:
- Address name. Source firewall.service.custom.name firewall.service.group.name.
required: true
type: str
service_negate:
description:
- When enabled service specifies what the service must NOT be.
type: str
choices:
- enable
- disable
session_ttl:
description:
- Session TTL in seconds for sessions accepted by this policy. 0 means use the system default session TTL.
type: int
spamfilter_profile:
description:
- Name of an existing Spam filter profile. Source spamfilter.profile.name.
type: str
srcaddr:
description:
- Source address and address group names.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
srcaddr_negate:
description:
- When enabled srcaddr specifies what the source address must NOT be.
type: str
choices:
- enable
- disable
srcintf:
description:
- Incoming (ingress) interface.
type: list
suboptions:
name:
description:
- Interface name. Source system.zone.name system.interface.name.
required: true
type: str
ssh_filter_profile:
description:
- Name of an existing SSH filter profile. Source ssh-filter.profile.name.
type: str
ssh_policy_redirect:
description:
- Redirect SSH traffic to matching transparent proxy policy.
type: str
choices:
- enable
- disable
ssl_mirror:
description:
- Enable to copy decrypted SSL traffic to a FortiGate interface (called SSL mirroring).
type: str
choices:
- enable
- disable
ssl_mirror_intf:
description:
- SSL mirror interface name.
type: list
suboptions:
name:
description:
- Interface name. Source system.zone.name system.interface.name.
required: true
type: str
ssl_ssh_profile:
description:
- Name of an existing SSL SSH profile. Source firewall.ssl-ssh-profile.name.
type: str
status:
description:
- Enable or disable this policy.
type: str
choices:
- enable
- disable
tcp_mss_receiver:
description:
- Receiver TCP maximum segment size (MSS).
type: int
tcp_mss_sender:
description:
- Sender TCP maximum segment size (MSS).
type: int
tcp_session_without_syn:
description:
- Enable/disable creation of TCP session without SYN flag.
type: str
choices:
- all
- data-only
- disable
timeout_send_rst:
description:
- Enable/disable sending RST packets when TCP sessions expire.
type: str
choices:
- enable
- disable
tos:
description:
- ToS (Type of Service) value used for comparison.
type: str
tos_mask:
description:
- Non-zero bit positions are used for comparison while zero bit positions are ignored.
type: str
tos_negate:
description:
- Enable negated TOS match.
type: str
choices:
- enable
- disable
traffic_shaper:
description:
- Reverse traffic shaper. Source firewall.shaper.traffic-shaper.name.
type: str
traffic_shaper_reverse:
description:
- Reverse traffic shaper. Source firewall.shaper.traffic-shaper.name.
type: str
url_category:
description:
- URL category ID list.
type: list
suboptions:
id:
description:
- URL category ID.
required: true
type: int
users:
description:
- Names of individual users that can authenticate with this policy.
type: list
suboptions:
name:
description:
- Names of individual users that can authenticate with this policy. Source user.local.name.
required: true
type: str
utm_status:
description:
- Enable AV/web/ips protection profile.
type: str
choices:
- enable
- disable
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
type: str
vlan_cos_fwd:
description:
- 'VLAN forward direction user priority: 255 passthrough, 0 lowest, 7 highest'
type: int
vlan_cos_rev:
description:
- 'VLAN reverse direction user priority: 255 passthrough, 0 lowest, 7 highest'
type: int
vlan_filter:
description:
- Set VLAN filters.
type: str
voip_profile:
description:
- Name of an existing VoIP profile. Source voip.profile.name.
type: str
vpntunnel:
description:
- 'Policy-based IPsec VPN: name of the IPsec VPN Phase 1. Source vpn.ipsec.phase1.name vpn.ipsec.manualkey.name.'
type: str
waf_profile:
description:
- Name of an existing Web application firewall profile. Source waf.profile.name.
type: str
webcache:
description:
- Enable/disable web cache.
type: str
choices:
- enable
- disable
webcache_https:
description:
- Enable/disable web cache for HTTPS.
type: str
choices:
- disable
- enable
webfilter_profile:
description:
- Name of an existing Web filter profile. Source webfilter.profile.name.
type: str
webproxy_forward_server:
description:
- Web proxy forward server name. Source web-proxy.forward-server.name web-proxy.forward-server-group.name.
type: str
webproxy_profile:
description:
- Webproxy profile name. Source web-proxy.profile.name.
type: str
'''
EXAMPLES = '''
- collections:
- fortinet.fortios
connection: httpapi
hosts: fortigate01
vars:
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
vdom: root
tasks:
- name: fortios_firewall_policy6
fortios_firewall_policy6:
vdom: root
state: present
firewall_policy6:
action: deny
anti_replay: enable
auto_asic_offload: enable
diffserv_forward: disable
diffserv_reverse: disable
diffservcode_forward: '000000'
diffservcode_rev: '000000'
dsri: disable
dstaddr:
- name: all
dstaddr_negate: disable
dstintf:
- name: port3
firewall_session_dirty: check-all
fixedport: disable
http_policy_redirect: disable
inbound: disable
inspection_mode: flow
ippool: disable
logtraffic: disable
logtraffic_start: disable
name: policy6p1
nat: disable
natinbound: disable
natoutbound: disable
outbound: disable
policyid: 1
profile_type: single
rsso: disable
schedule: always
send_deny_packet: disable
service:
- name: ALL
service_negate: disable
srcaddr:
- name: all
srcaddr_negate: disable
srcintf:
- name: port4
ssh_policy_redirect: disable
ssl_mirror: disable
status: enable
tcp_mss_receiver: 0
tcp_mss_sender: 0
tcp_session_without_syn: disable
timeout_send_rst: disable
tos: '0x00'
tos_mask: '0x00'
tos_negate: disable
utm_status: disable
vlan_cos_fwd: 0
vlan_cos_rev: 0
webcache: disable
webcache_https: disable
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import is_same_comparison
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import serialize
def filter_firewall_policy6_data(json):
option_list = ['action', 'anti_replay', 'app_category',
'app_group', 'application', 'application_list',
'auto_asic_offload', 'av_profile', 'cifs_profile',
'comments', 'custom_log_fields', 'devices',
'diffserv_forward', 'diffserv_reverse', 'diffservcode_forward',
'diffservcode_rev', 'dlp_sensor', 'dnsfilter_profile',
'dscp_match', 'dscp_negate', 'dscp_value',
'dsri', 'dstaddr', 'dstaddr_negate',
'dstintf', 'emailfilter_profile', 'firewall_session_dirty',
'fixedport', 'fsso_groups', 'global_label',
'groups', 'http_policy_redirect', 'icap_profile',
'inbound', 'inspection_mode', 'ippool',
'ips_sensor', 'label', 'logtraffic',
'logtraffic_start', 'mms_profile', 'name',
'nat', 'natinbound', 'natoutbound',
'np_acceleration', 'outbound', 'per_ip_shaper',
'policyid', 'poolname', 'profile_group',
'profile_protocol_options', 'profile_type', 'replacemsg_override_group',
'rsso', 'schedule', 'send_deny_packet',
'service', 'service_negate', 'session_ttl',
'spamfilter_profile', 'srcaddr', 'srcaddr_negate',
'srcintf', 'ssh_filter_profile', 'ssh_policy_redirect',
'ssl_mirror', 'ssl_mirror_intf', 'ssl_ssh_profile',
'status', 'tcp_mss_receiver', 'tcp_mss_sender',
'tcp_session_without_syn', 'timeout_send_rst', 'tos',
'tos_mask', 'tos_negate', 'traffic_shaper',
'traffic_shaper_reverse', 'url_category', 'users',
'utm_status', 'uuid', 'vlan_cos_fwd',
'vlan_cos_rev', 'vlan_filter', 'voip_profile',
'vpntunnel', 'waf_profile', 'webcache',
'webcache_https', 'webfilter_profile', 'webproxy_forward_server',
'webproxy_profile']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_policy6(data, fos, check_mode=False):
vdom = data['vdom']
state = data['state']
firewall_policy6_data = data['firewall_policy6']
filtered_data = underscore_to_hyphen(filter_firewall_policy6_data(firewall_policy6_data))
# check_mode starts from here
if check_mode:
mkey = fos.get_mkey('firewall', 'policy6', filtered_data, vdom=vdom)
current_data = fos.get('firewall', 'policy6', vdom=vdom, mkey=mkey)
is_existed = current_data and current_data.get('http_status') == 200 \
and isinstance(current_data.get('results'), list) \
and len(current_data['results']) > 0
# 2. if it exists and the state is 'present' then compare current settings with desired
if state == 'present' or state is True:
if mkey is None:
return False, True, filtered_data
# if mkey exists then compare each other
# record exits and they're matched or not
if is_existed:
is_same = is_same_comparison(
serialize(current_data['results'][0]), serialize(filtered_data))
return False, not is_same, filtered_data
# record does not exist
return False, True, filtered_data
if state == 'absent':
if mkey is None:
return False, False, filtered_data
if is_existed:
return False, True, filtered_data
return False, False, filtered_data
return True, False, {'reason: ': 'Must provide state parameter'}
if state == "present" or state is True:
return fos.set('firewall',
'policy6',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'policy6',
mkey=filtered_data['policyid'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_firewall(data, fos, check_mode):
fos.do_member_operation('firewall_policy6')
if data['firewall_policy6']:
resp = firewall_policy6(data, fos, check_mode)
else:
fos._module.fail_json(msg='missing task body: %s' % ('firewall_policy6'))
if check_mode:
return resp
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "list",
"children": {
"per_ip_shaper": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webproxy_forward_server": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"dscp_match": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"diffserv_reverse": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"traffic_shaper_reverse": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"uuid": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vpntunnel": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dlp_sensor": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"custom_log_fields": {
"type": "list",
"children": {
"field_id": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"voip_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"np_acceleration": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fsso_groups": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"emailfilter_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"natoutbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"logtraffic": {
"type": "string",
"options": [
{
"value": "all",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "utm",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"spamfilter_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"ssh_filter_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vlan_cos_rev": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tcp_session_without_syn": {
"type": "string",
"options": [
{
"value": "all",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "data-only",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"url_category": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"session_ttl": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"mms_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"poolname": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_ssh_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"comments": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"label": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"app_category": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"profile_type": {
"type": "string",
"options": [
{
"value": "single",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "group",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"schedule": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"diffservcode_rev": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tcp_mss_sender": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstintf": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"srcaddr_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_mirror_intf": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dscp_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"auto_asic_offload": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vlan_filter": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"srcintf": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssh_policy_redirect": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"anti_replay": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"action": {
"type": "string",
"options": [
{
"value": "accept",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "deny",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "ipsec",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tos_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"replacemsg_override_group": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"http_policy_redirect": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"groups": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"icap_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"application_list": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"service_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"send_deny_packet": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webcache": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"ippool": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"service": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webproxy_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"tos": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"dnsfilter_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"profile_group": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tcp_mss_receiver": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"global_label": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"inbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"srcaddr": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tos_mask": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"logtraffic_start": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webcache_https": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"ips_sensor": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"devices": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.2.3": True,
"v6.0.5": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"rsso": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"traffic_shaper": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"diffserv_forward": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"natinbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"cifs_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"vlan_cos_fwd": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fixedport": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dsri": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"outbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"application": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_mirror": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"nat": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"timeout_send_rst": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"status": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"diffservcode_forward": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"users": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dscp_value": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"utm_status": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"waf_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"policyid": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"firewall_session_dirty": {
"type": "string",
"options": [
{
"value": "check-all",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "check-new",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webfilter_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstaddr_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"av_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstaddr": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"app_group": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"profile_protocol_options": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"inspection_mode": {
"type": "string",
"options": [
{
"value": "proxy",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "flow",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'policyid'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"firewall_policy6": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["firewall_policy6"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["firewall_policy6"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "firewall_policy6")
is_error, has_changed, result = fortios_firewall(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| true | true |
f724ef7c58e53166da599152abac034e13800121 | 368 | py | Python | copyspecial/my_test.py | rayedbar/google_python_exercises | 9b0903ab9acd91ca82d9568725139cfbb43edae6 | [
"Apache-2.0"
] | null | null | null | copyspecial/my_test.py | rayedbar/google_python_exercises | 9b0903ab9acd91ca82d9568725139cfbb43edae6 | [
"Apache-2.0"
] | null | null | null | copyspecial/my_test.py | rayedbar/google_python_exercises | 9b0903ab9acd91ca82d9568725139cfbb43edae6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys
import os
import subprocess
filename = 'haha.txt'
try:
f = open(filename, 'rU')
text = f.read()
f.close()
except IOError:
## Control jumps directly to here if any of the above lines throws IOError.
sys.stderr.write('problem reading:' + filename)
## In any case, the code then continues with the line after the try/except
| 21.647059 | 79 | 0.701087 |
import sys
import os
import subprocess
filename = 'haha.txt'
try:
f = open(filename, 'rU')
text = f.read()
f.close()
except IOError:
| true | true |
f724efb25c23769a08c939a3c661b1d41864648b | 7,801 | py | Python | laser-chess-backend/app/core/routers/users.py | tojatos/laser-tactics | 538bef7ab03bf35c0ef27e195001f6f7f12c1ba4 | [
"MIT"
] | 2 | 2021-12-12T03:45:18.000Z | 2021-12-21T03:53:23.000Z | laser-chess-backend/app/core/routers/users.py | tojatos/laser-tactics | 538bef7ab03bf35c0ef27e195001f6f7f12c1ba4 | [
"MIT"
] | 1 | 2022-03-26T15:13:29.000Z | 2022-03-26T15:13:29.000Z | laser-chess-backend/app/core/routers/users.py | tojatos/laser-tactics | 538bef7ab03bf35c0ef27e195001f6f7f12c1ba4 | [
"MIT"
] | null | null | null | from fastapi import Depends, HTTPException
from fastapi import status, APIRouter
from jose import JWTError, jwt
from sqlalchemy.orm import Session
from app.core.dependecies import get_db, SECRET_KEY, ALGORITHM, TokenPurpose, get_current_active_user, get_current_user, \
verify_password
from app.core.internal import schemas, crud
from app.game_engine.models import *
router = APIRouter(
prefix="/users",
tags=["users"],
responses={404: {"error": "Not found"}, 422: {"error": "Invalid input data"}},
)
# TODO: test
@router.post("/verify/{token}")
def verify_user(token: str, db: Session = Depends(get_db)):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
email: str = payload.get("sub")
purpose = payload.get("purpose")
is_verifed = payload.get("hash")
if email is None or purpose != TokenPurpose.ACCOUNT_VERIFICATION:
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
token_data = schemas.VerificationTokenData(email=email, purpose=purpose, hash=is_verifed)
except JWTError:
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
user = crud.get_user_by_email(db, token_data.email)
if user is None:
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
if user.is_verified:
raise HTTPException(status_code=200, detail='Account already confirmed. Please login.')
else:
crud.verify_user(user=user, db=db)
return {"detail": "Account verified successfully"}
# TODO: test
@router.post("/change_password")
def change_password(change_password_schema: schemas.EmergencyChangePasswordSchema, db: Session = Depends(get_db)):
try:
token = change_password_schema.token
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
purpose = payload.get("purpose")
hash = payload.get("hash")
if username is None or purpose != TokenPurpose.CHANGE_PASSWORD:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
token_data = schemas.TokenData(username=username, purpose=purpose, hash=hash)
except JWTError:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
user = crud.get_user(db, token_data.username)
if user is None:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
if user.hashed_password != hash:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
return crud.change_password(user, change_password_schema.newPassword, db)
@router.post("", response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_email(db, email=user.email)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
db_user_1 = crud.get_user(db, username=user.username)
if db_user_1:
raise HTTPException(status_code=400, detail="This name is taken")
return crud.create_user(db=db, user=user)
@router.get("", response_model=List[schemas.UserGet])
def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users(db, skip=skip, limit=limit)
return users
@router.get("/{username}", response_model=schemas.UserGet)
def read_user(username: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, username=username)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@router.get("/me/blocked", response_model=List[str])
async def get_users_blocked(current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
return crud.get_blocked_users(user=current_user, db=db)
@router.post("/me/block", response_model=schemas.BlockedUsers)
async def block_user(usernameSchema: schemas.Username, current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
username = usernameSchema.username
user_to_block = crud.get_user(username=username, db=db)
if not user_to_block:
raise HTTPException(status_code=404, detail="User not found")
blocked = crud.get_blocked_users(current_user, db)
if user_to_block.username == current_user.username:
raise HTTPException(status_code=403, detail="Cannot block yourself")
if username in blocked:
raise HTTPException(status_code=403, detail="User already blocked")
return crud.create_block_record(user=current_user, user_to_block=user_to_block, db=db)
# TODO: test
@router.delete("/me/unblock")
async def unblock_user(usernameSchema: schemas.Username, current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
username = usernameSchema.username
user_to_unblock = crud.get_user(username=username, db=db)
blocked = crud.get_blocked_users(user=current_user, db=db)
if not user_to_unblock:
raise HTTPException(status_code=404, detail="User not found")
if user_to_unblock.username not in blocked:
raise HTTPException(status_code=403, detail="User not blocked")
return crud.remove_block_record(user=current_user, blocked_user=user_to_unblock, db=db)
@router.get("/me/info", response_model=schemas.User)
async def read_users_me(current_user: schemas.User = Depends(get_current_active_user)):
return current_user
@router.post("/me/change_password")
def change_password(change_password_schema: schemas.ChangePasswordSchema,
current_user: schemas.User = Depends(get_current_user), db: Session = Depends(get_db)):
db_user = crud.get_user(db=db, username=current_user.username)
if not verify_password(change_password_schema.oldPassword, db_user.hashed_password):
raise HTTPException(status_code=401, detail="Invalid old password")
return crud.change_password(user=current_user, new_password=change_password_schema.newPassword, db=db)
@router.get("/{username}/history", response_model=List[schemas.GameHistoryEntry])
def get_users_game_history(username: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, username=username)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
history = crud.get_last_20_matches(db=db, user=db_user)
return history
@router.get("/{username}/stats", response_model=schemas.Stats)
def get_stats(username: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, username=username)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return crud.get_stats(db=db, user=db_user)
@router.get("/me/settings", response_model=schemas.Settings)
def get_settings(current_user: schemas.User = Depends(get_current_active_user), db: Session = Depends(get_db)):
return crud.get_settings(db=db, user=current_user)
@router.patch("/me/settings", response_model=schemas.Settings)
def update_settings(settings: schemas.Settings, current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
return crud.update_settings(settings=settings, db=db, user=current_user)
@router.get("/ranking/top", response_model=List[schemas.UserGet])
def get_top_ranked(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users_by_rating(db, skip=skip, limit=limit)
return users
| 46.159763 | 122 | 0.735419 | from fastapi import Depends, HTTPException
from fastapi import status, APIRouter
from jose import JWTError, jwt
from sqlalchemy.orm import Session
from app.core.dependecies import get_db, SECRET_KEY, ALGORITHM, TokenPurpose, get_current_active_user, get_current_user, \
verify_password
from app.core.internal import schemas, crud
from app.game_engine.models import *
router = APIRouter(
prefix="/users",
tags=["users"],
responses={404: {"error": "Not found"}, 422: {"error": "Invalid input data"}},
)
@router.post("/verify/{token}")
def verify_user(token: str, db: Session = Depends(get_db)):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
email: str = payload.get("sub")
purpose = payload.get("purpose")
is_verifed = payload.get("hash")
if email is None or purpose != TokenPurpose.ACCOUNT_VERIFICATION:
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
token_data = schemas.VerificationTokenData(email=email, purpose=purpose, hash=is_verifed)
except JWTError:
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
user = crud.get_user_by_email(db, token_data.email)
if user is None:
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
if user.is_verified:
raise HTTPException(status_code=200, detail='Account already confirmed. Please login.')
else:
crud.verify_user(user=user, db=db)
return {"detail": "Account verified successfully"}
@router.post("/change_password")
def change_password(change_password_schema: schemas.EmergencyChangePasswordSchema, db: Session = Depends(get_db)):
try:
token = change_password_schema.token
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
purpose = payload.get("purpose")
hash = payload.get("hash")
if username is None or purpose != TokenPurpose.CHANGE_PASSWORD:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
token_data = schemas.TokenData(username=username, purpose=purpose, hash=hash)
except JWTError:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
user = crud.get_user(db, token_data.username)
if user is None:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
if user.hashed_password != hash:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
return crud.change_password(user, change_password_schema.newPassword, db)
@router.post("", response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_email(db, email=user.email)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
db_user_1 = crud.get_user(db, username=user.username)
if db_user_1:
raise HTTPException(status_code=400, detail="This name is taken")
return crud.create_user(db=db, user=user)
@router.get("", response_model=List[schemas.UserGet])
def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users(db, skip=skip, limit=limit)
return users
@router.get("/{username}", response_model=schemas.UserGet)
def read_user(username: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, username=username)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@router.get("/me/blocked", response_model=List[str])
async def get_users_blocked(current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
return crud.get_blocked_users(user=current_user, db=db)
@router.post("/me/block", response_model=schemas.BlockedUsers)
async def block_user(usernameSchema: schemas.Username, current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
username = usernameSchema.username
user_to_block = crud.get_user(username=username, db=db)
if not user_to_block:
raise HTTPException(status_code=404, detail="User not found")
blocked = crud.get_blocked_users(current_user, db)
if user_to_block.username == current_user.username:
raise HTTPException(status_code=403, detail="Cannot block yourself")
if username in blocked:
raise HTTPException(status_code=403, detail="User already blocked")
return crud.create_block_record(user=current_user, user_to_block=user_to_block, db=db)
@router.delete("/me/unblock")
async def unblock_user(usernameSchema: schemas.Username, current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
username = usernameSchema.username
user_to_unblock = crud.get_user(username=username, db=db)
blocked = crud.get_blocked_users(user=current_user, db=db)
if not user_to_unblock:
raise HTTPException(status_code=404, detail="User not found")
if user_to_unblock.username not in blocked:
raise HTTPException(status_code=403, detail="User not blocked")
return crud.remove_block_record(user=current_user, blocked_user=user_to_unblock, db=db)
@router.get("/me/info", response_model=schemas.User)
async def read_users_me(current_user: schemas.User = Depends(get_current_active_user)):
return current_user
@router.post("/me/change_password")
def change_password(change_password_schema: schemas.ChangePasswordSchema,
current_user: schemas.User = Depends(get_current_user), db: Session = Depends(get_db)):
db_user = crud.get_user(db=db, username=current_user.username)
if not verify_password(change_password_schema.oldPassword, db_user.hashed_password):
raise HTTPException(status_code=401, detail="Invalid old password")
return crud.change_password(user=current_user, new_password=change_password_schema.newPassword, db=db)
@router.get("/{username}/history", response_model=List[schemas.GameHistoryEntry])
def get_users_game_history(username: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, username=username)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
history = crud.get_last_20_matches(db=db, user=db_user)
return history
@router.get("/{username}/stats", response_model=schemas.Stats)
def get_stats(username: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, username=username)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return crud.get_stats(db=db, user=db_user)
@router.get("/me/settings", response_model=schemas.Settings)
def get_settings(current_user: schemas.User = Depends(get_current_active_user), db: Session = Depends(get_db)):
return crud.get_settings(db=db, user=current_user)
@router.patch("/me/settings", response_model=schemas.Settings)
def update_settings(settings: schemas.Settings, current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
return crud.update_settings(settings=settings, db=db, user=current_user)
@router.get("/ranking/top", response_model=List[schemas.UserGet])
def get_top_ranked(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users_by_rating(db, skip=skip, limit=limit)
return users
| true | true |
f724f1291e5caf124dff577988cb066ae98c82f0 | 22,034 | py | Python | tests/gcp/hooks/test_bigtable.py | InigoSJ/airflow | 8b97a387dc30d8c88390d500ec99333798c20f1c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2019-09-19T15:22:15.000Z | 2019-09-19T15:22:15.000Z | tests/gcp/hooks/test_bigtable.py | InigoSJ/airflow | 8b97a387dc30d8c88390d500ec99333798c20f1c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2017-05-11T22:57:49.000Z | 2017-05-11T22:57:49.000Z | tests/gcp/hooks/test_bigtable.py | InigoSJ/airflow | 8b97a387dc30d8c88390d500ec99333798c20f1c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-11-16T09:03:58.000Z | 2020-11-16T09:03:58.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import google
from google.cloud.bigtable import Client
from google.cloud.bigtable.instance import Instance
from tests.contrib.utils.base_gcp_mock import mock_base_gcp_hook_no_default_project_id, \
mock_base_gcp_hook_default_project_id, GCP_PROJECT_ID_HOOK_UNIT_TEST
from tests.compat import mock, PropertyMock
from airflow import AirflowException
from airflow.gcp.hooks.bigtable import BigtableHook
CBT_INSTANCE = 'instance'
CBT_CLUSTER = 'cluster'
CBT_ZONE = 'zone'
CBT_TABLE = 'table'
class TestBigtableHookNoDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.__init__',
new=mock_base_gcp_hook_no_default_project_id):
self.bigtable_hook_no_default_project_id = BigtableHook(gcp_conn_id='test')
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook.client_info", new_callable=mock.PropertyMock)
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook._get_credentials")
@mock.patch("airflow.gcp.hooks.bigtable.Client")
def test_bigtable_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.bigtable_hook_no_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value,
admin=True
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.bigtable_hook_no_default_project_id._client, result)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.get_instance(instance_id=CBT_INSTANCE)
instance_exists_method.assert_not_called()
instance_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_no_default_project_id.get_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNotNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
delete_method = instance_method.return_value.delete
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.delete_instance(instance_id=CBT_INSTANCE)
instance_exists_method.assert_not_called()
instance_method.assert_not_called()
delete_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_no_default_project_id.delete_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_missing_project_id(self, get_client, instance_create, mock_project_id):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_not_called()
instance_create.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_overridden_project_id(self, get_client, instance_create):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_no_default_project_id.create_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='example-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.delete_table(
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_not_called()
instance_exists_method.assert_not_called()
table_delete_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_no_default_project_id.delete_table(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='example-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
class TestBigtableHookDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.__init__',
new=mock_base_gcp_hook_default_project_id):
self.bigtable_hook_default_project_id = BigtableHook(gcp_conn_id='test')
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook.client_info", new_callable=mock.PropertyMock)
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook._get_credentials")
@mock.patch("airflow.gcp.hooks.bigtable.Client")
def test_bigtable_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.bigtable_hook_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value,
admin=True
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.bigtable_hook_default_project_id._client, result)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_default_project_id.get_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNotNone(res)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_default_project_id.get_instance(
project_id='new-project',
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='new-project')
self.assertIsNotNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_no_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
res = self.bigtable_hook_default_project_id.get_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_default_project_id.delete_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_default_project_id.delete_instance(
project_id='new-project', instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='new-project')
self.assertIsNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_no_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
delete_method = instance_method.return_value.delete
self.bigtable_hook_default_project_id.delete_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_not_called()
get_client.assert_called_once_with(project_id='example-project')
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance(self, get_client, instance_create, mock_project_id):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='example-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_overridden_project_id(self, get_client, instance_create):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
project_id='new-project',
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='new-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_default_project_id.delete_table(
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='example-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_default_project_id.delete_table(
project_id='new-project',
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='new-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_table(self, get_client, create):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.create_table(
instance=instance,
table_id=CBT_TABLE)
get_client.assert_not_called()
create.assert_called_once_with([], {})
@mock.patch('google.cloud.bigtable.cluster.Cluster.update')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_update_cluster(self, get_client, update):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.update_cluster(
instance=instance,
cluster_id=CBT_CLUSTER,
nodes=4)
get_client.assert_not_called()
update.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.list_column_families')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_list_column_families(self, get_client, list_column_families):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
get_client.return_value = client
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.get_column_families_for_table(
instance=instance, table_id=CBT_TABLE)
get_client.assert_not_called()
list_column_families.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.get_cluster_states')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_cluster_states(self, get_client, get_cluster_states):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.get_cluster_states_for_table(
instance=instance, table_id=CBT_TABLE)
get_client.assert_not_called()
get_cluster_states.assert_called_once_with()
| 49.626126 | 102 | 0.745983 |
import unittest
import google
from google.cloud.bigtable import Client
from google.cloud.bigtable.instance import Instance
from tests.contrib.utils.base_gcp_mock import mock_base_gcp_hook_no_default_project_id, \
mock_base_gcp_hook_default_project_id, GCP_PROJECT_ID_HOOK_UNIT_TEST
from tests.compat import mock, PropertyMock
from airflow import AirflowException
from airflow.gcp.hooks.bigtable import BigtableHook
CBT_INSTANCE = 'instance'
CBT_CLUSTER = 'cluster'
CBT_ZONE = 'zone'
CBT_TABLE = 'table'
class TestBigtableHookNoDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.__init__',
new=mock_base_gcp_hook_no_default_project_id):
self.bigtable_hook_no_default_project_id = BigtableHook(gcp_conn_id='test')
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook.client_info", new_callable=mock.PropertyMock)
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook._get_credentials")
@mock.patch("airflow.gcp.hooks.bigtable.Client")
def test_bigtable_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.bigtable_hook_no_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value,
admin=True
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.bigtable_hook_no_default_project_id._client, result)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.get_instance(instance_id=CBT_INSTANCE)
instance_exists_method.assert_not_called()
instance_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_no_default_project_id.get_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNotNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
delete_method = instance_method.return_value.delete
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.delete_instance(instance_id=CBT_INSTANCE)
instance_exists_method.assert_not_called()
instance_method.assert_not_called()
delete_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_no_default_project_id.delete_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_missing_project_id(self, get_client, instance_create, mock_project_id):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_not_called()
instance_create.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_overridden_project_id(self, get_client, instance_create):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_no_default_project_id.create_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='example-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.delete_table(
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_not_called()
instance_exists_method.assert_not_called()
table_delete_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_no_default_project_id.delete_table(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='example-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
class TestBigtableHookDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.__init__',
new=mock_base_gcp_hook_default_project_id):
self.bigtable_hook_default_project_id = BigtableHook(gcp_conn_id='test')
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook.client_info", new_callable=mock.PropertyMock)
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook._get_credentials")
@mock.patch("airflow.gcp.hooks.bigtable.Client")
def test_bigtable_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.bigtable_hook_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value,
admin=True
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.bigtable_hook_default_project_id._client, result)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_default_project_id.get_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNotNone(res)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_default_project_id.get_instance(
project_id='new-project',
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='new-project')
self.assertIsNotNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_no_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
res = self.bigtable_hook_default_project_id.get_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_default_project_id.delete_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_default_project_id.delete_instance(
project_id='new-project', instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='new-project')
self.assertIsNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_no_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
delete_method = instance_method.return_value.delete
self.bigtable_hook_default_project_id.delete_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_not_called()
get_client.assert_called_once_with(project_id='example-project')
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance(self, get_client, instance_create, mock_project_id):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='example-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_overridden_project_id(self, get_client, instance_create):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
project_id='new-project',
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='new-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_default_project_id.delete_table(
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='example-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_default_project_id.delete_table(
project_id='new-project',
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='new-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_table(self, get_client, create):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.create_table(
instance=instance,
table_id=CBT_TABLE)
get_client.assert_not_called()
create.assert_called_once_with([], {})
@mock.patch('google.cloud.bigtable.cluster.Cluster.update')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_update_cluster(self, get_client, update):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.update_cluster(
instance=instance,
cluster_id=CBT_CLUSTER,
nodes=4)
get_client.assert_not_called()
update.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.list_column_families')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_list_column_families(self, get_client, list_column_families):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
get_client.return_value = client
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.get_column_families_for_table(
instance=instance, table_id=CBT_TABLE)
get_client.assert_not_called()
list_column_families.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.get_cluster_states')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_cluster_states(self, get_client, get_cluster_states):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.get_cluster_states_for_table(
instance=instance, table_id=CBT_TABLE)
get_client.assert_not_called()
get_cluster_states.assert_called_once_with()
| true | true |
f724f160afc41ed74cc89d83afb1c22e3d02f806 | 3,920 | py | Python | example.py | byu-dml/d3m-profiler | 9a3bc45061267091b0109f2159648785e370a18b | [
"MIT"
] | null | null | null | example.py | byu-dml/d3m-profiler | 9a3bc45061267091b0109f2159648785e370a18b | [
"MIT"
] | 5 | 2020-04-22T19:15:06.000Z | 2021-03-25T15:28:30.000Z | example.py | byu-dml/d3m-profiler | 9a3bc45061267091b0109f2159648785e370a18b | [
"MIT"
] | null | null | null | import numpy as np
import multiprocessing as mp
import pathlib as pl
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC as SupportVectorClassifier
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC as SupportVectorClassifier
from d3m_profiler import rebalance, score_results
from d3m_profiler.evaluate_models import run_models, _save_results
from d3m_profiler.embed import embed
_NUM_THREADS = mp.cpu_count()
results = pd.DataFrame(columns=['data_collection', 'classifier', 'balanced', 'accuracy_score', 'f1_score_micro', 'f1_score_macro', 'f1_score_weighted'])
#closed_bal_file = 'data/closed_d3m_bal.csv'
#closed_unbal_file = 'data/closed_d3m_unbal.csv'
#open_bal_file = 'data/open_d3m_bal.csv'
#open_unbal_file = 'data/open_d3m_unbal.csv'
#files = [closed_unbal_file, closed_bal_file, open_unbal_file, open_bal_file]
type_column = 'colType'
model_weights_path = 'torontobooks_unigrams.bin'
open_d3m_file = 'data/open_d3m_data.csv'
closed_d3m_file = 'data/closed_d3m_data.csv'
files = [open_d3m_file]
#files = [open_d3m_file, closed_d3m_file]
#files = [closed_d3m_file, open_d3m_file]
for _file in files:
data_collection = _file.split('/')[1]
print(data_collection)
orig_df = pd.read_csv(_file)
orig_df = orig_df.applymap(str)
dfs = [embed(orig_df, type_column, model_weights_path)]
class_counts = orig_df[type_column].value_counts().values
balanced = len(set(class_counts)) == 1
if (not balanced):
print('rebalancing {} data collection'.format(data_collection))
rebal_df = rebalance.rebalance_SMOTE(orig_df, type_column, 'smote', model_weights_path)
dfs.append(rebal_df)
for df in dfs:
class_counts = df[type_column].value_counts().values
balanced = len(set(class_counts)) == 1
print(balanced)
xtrain, xtest, ytrain, ytest = None, None, None, None
if (balanced):
X_syn = df[df['datasetName'].eq('SYNTHETIC')].drop(['datasetName', type_column], axis=1)
y_syn = df[df['datasetName'].eq('SYNTHETIC')][type_column]
X_organ = df[df['datasetName'] != 'SYNTHETIC'].drop(['datasetName', type_column], axis=1)
y_organ = df[df['datasetName'] != 'SYNTHETIC'][type_column]
xtrain, xtest, ytrain, ytest = train_test_split(X_organ, y_organ, test_size=0.33)
xtrain = xtrain.append(X_syn)
ytrain = ytrain.append(y_syn)
else:
X = df.drop(['datasetName', type_column], axis=1)
y = df[type_column]
dataset_names = df['datasetName']
xtrain, xtest, ytrain, ytest = train_test_split(X, y, test_size=0.33)
#for model_class in [SupportVectorClassifier, RandomForestClassifier]:
for model_class in [RandomForestClassifier]:
classifier = model_class.__name__
print('evaluating model: {}'.format(classifier))
model = model_class()
print('fitting model...')
model.fit(xtrain, ytrain)
if (balanced):
filename = 'RF_public_model.sav'
pickle.dump(model, open(filename, 'wb'))
yhat = model.predict(xtest)
accuracy = accuracy_score(ytest, yhat)
f1_micro = f1_score(ytest, yhat, average='micro')
f1_macro = f1_score(ytest, yhat, average='macro')
f1_weighted = f1_score(ytest, yhat, average='weighted')
results = results.append({'data_collection': data_collection, 'classifier': classifier, 'balanced': balanced, 'accuracy_score': accuracy,
'f1_score_micro': f1_micro, 'f1_score_macro': f1_macro, 'f1_score_weighted': f1_weighted}, ignore_index=True)
print(results)
results.to_csv('data/results_2.csv', index=False)
| 36.981132 | 152 | 0.685969 | import numpy as np
import multiprocessing as mp
import pathlib as pl
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC as SupportVectorClassifier
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC as SupportVectorClassifier
from d3m_profiler import rebalance, score_results
from d3m_profiler.evaluate_models import run_models, _save_results
from d3m_profiler.embed import embed
_NUM_THREADS = mp.cpu_count()
results = pd.DataFrame(columns=['data_collection', 'classifier', 'balanced', 'accuracy_score', 'f1_score_micro', 'f1_score_macro', 'f1_score_weighted'])
type_column = 'colType'
model_weights_path = 'torontobooks_unigrams.bin'
open_d3m_file = 'data/open_d3m_data.csv'
closed_d3m_file = 'data/closed_d3m_data.csv'
files = [open_d3m_file]
for _file in files:
data_collection = _file.split('/')[1]
print(data_collection)
orig_df = pd.read_csv(_file)
orig_df = orig_df.applymap(str)
dfs = [embed(orig_df, type_column, model_weights_path)]
class_counts = orig_df[type_column].value_counts().values
balanced = len(set(class_counts)) == 1
if (not balanced):
print('rebalancing {} data collection'.format(data_collection))
rebal_df = rebalance.rebalance_SMOTE(orig_df, type_column, 'smote', model_weights_path)
dfs.append(rebal_df)
for df in dfs:
class_counts = df[type_column].value_counts().values
balanced = len(set(class_counts)) == 1
print(balanced)
xtrain, xtest, ytrain, ytest = None, None, None, None
if (balanced):
X_syn = df[df['datasetName'].eq('SYNTHETIC')].drop(['datasetName', type_column], axis=1)
y_syn = df[df['datasetName'].eq('SYNTHETIC')][type_column]
X_organ = df[df['datasetName'] != 'SYNTHETIC'].drop(['datasetName', type_column], axis=1)
y_organ = df[df['datasetName'] != 'SYNTHETIC'][type_column]
xtrain, xtest, ytrain, ytest = train_test_split(X_organ, y_organ, test_size=0.33)
xtrain = xtrain.append(X_syn)
ytrain = ytrain.append(y_syn)
else:
X = df.drop(['datasetName', type_column], axis=1)
y = df[type_column]
dataset_names = df['datasetName']
xtrain, xtest, ytrain, ytest = train_test_split(X, y, test_size=0.33)
for model_class in [RandomForestClassifier]:
classifier = model_class.__name__
print('evaluating model: {}'.format(classifier))
model = model_class()
print('fitting model...')
model.fit(xtrain, ytrain)
if (balanced):
filename = 'RF_public_model.sav'
pickle.dump(model, open(filename, 'wb'))
yhat = model.predict(xtest)
accuracy = accuracy_score(ytest, yhat)
f1_micro = f1_score(ytest, yhat, average='micro')
f1_macro = f1_score(ytest, yhat, average='macro')
f1_weighted = f1_score(ytest, yhat, average='weighted')
results = results.append({'data_collection': data_collection, 'classifier': classifier, 'balanced': balanced, 'accuracy_score': accuracy,
'f1_score_micro': f1_micro, 'f1_score_macro': f1_macro, 'f1_score_weighted': f1_weighted}, ignore_index=True)
print(results)
results.to_csv('data/results_2.csv', index=False)
| true | true |
f724f1b8cc56dd4a31f3d47d459ebef89ff7cdca | 21,725 | py | Python | mmdet/datasets/coco.py | YunongPan/swin_gui | 52adc917d3413781e76609d021c6a2579fdf44d1 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/coco.py | YunongPan/swin_gui | 52adc917d3413781e76609d021c6a2579fdf44d1 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/coco.py | YunongPan/swin_gui | 52adc917d3413781e76609d021c6a2579fdf44d1 | [
"Apache-2.0"
] | null | null | null | import itertools
import logging
import os.path as osp
import tempfile
from collections import OrderedDict
import mmcv
import numpy as np
import pycocotools
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('schwarze_Schraube',)## check mark ##
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
raise AssertionError(
'Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = OrderedDict()
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 40.683521 | 79 | 0.529758 | import itertools
import logging
import os.path as osp
import tempfile
from collections import OrderedDict
import mmcv
import numpy as np
import pycocotools
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('schwarze_Schraube',)otations(self, ann_file):
if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
raise AssertionError(
'Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
valid_inds = []
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = OrderedDict()
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise:
precisions = cocoEval.eval['precision']
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| true | true |
f724f1c71834d7b7a9d035b610d98d0f0773158a | 1,933 | py | Python | website/migrations/0005_auto_20191213_1623.py | mwalsh161/iquise-website | ab674d7881e418fe02b533ae477982e328e8fec7 | [
"MIT"
] | 1 | 2021-12-19T01:05:26.000Z | 2021-12-19T01:05:26.000Z | website/migrations/0005_auto_20191213_1623.py | iQuISE/iquise-website | e6125fe938c549e020cd53a5aa718de101e972e9 | [
"MIT"
] | 16 | 2020-07-29T14:12:30.000Z | 2021-08-24T13:00:48.000Z | website/migrations/0005_auto_20191213_1623.py | mwalsh161/iquise-website | ab674d7881e418fe02b533ae477982e328e8fec7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-12-13 21:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0004_presenter_profile_image_thumb'),
]
operations = [
migrations.CreateModel(
name='EmbeddedVideo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('video_id', models.CharField(max_length=50)),
('public', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='EmbedEngine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('html_template', models.TextField(help_text='Use {{ID}} which will get swapped in for the EmbeddedVideo.video_id.')),
('url_help', models.CharField(blank=True, help_text='Used to help the user figure out where the video_id is.', max_length=100)),
],
),
migrations.AlterField(
model_name='presenter',
name='profile_image_thumb',
field=models.ImageField(blank=True, editable=False, upload_to='thumbs'),
),
migrations.AddField(
model_name='embeddedvideo',
name='engine',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='website.EmbedEngine'),
),
migrations.AddField(
model_name='presentation',
name='video',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='website.EmbeddedVideo'),
),
]
| 39.44898 | 144 | 0.608381 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0004_presenter_profile_image_thumb'),
]
operations = [
migrations.CreateModel(
name='EmbeddedVideo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('video_id', models.CharField(max_length=50)),
('public', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='EmbedEngine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('html_template', models.TextField(help_text='Use {{ID}} which will get swapped in for the EmbeddedVideo.video_id.')),
('url_help', models.CharField(blank=True, help_text='Used to help the user figure out where the video_id is.', max_length=100)),
],
),
migrations.AlterField(
model_name='presenter',
name='profile_image_thumb',
field=models.ImageField(blank=True, editable=False, upload_to='thumbs'),
),
migrations.AddField(
model_name='embeddedvideo',
name='engine',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='website.EmbedEngine'),
),
migrations.AddField(
model_name='presentation',
name='video',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='website.EmbeddedVideo'),
),
]
| true | true |
f724f29c529e0e3435e1c89bd6dd7fdf76857abc | 14,833 | py | Python | scripts/tool_shed/migrate_tools_to_repositories.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | 1 | 2019-11-03T11:45:43.000Z | 2019-11-03T11:45:43.000Z | scripts/tool_shed/migrate_tools_to_repositories.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | 4 | 2017-05-24T19:36:34.000Z | 2019-08-23T02:49:18.000Z | scripts/tool_shed/migrate_tools_to_repositories.py | abretaud/galaxy | 1ad89511540e6800cd2d0da5d878c1c77d8ccfe9 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
'''
Migrate old Galaxy tool shed to next gen Galaxy tool shed. Specifically, the tool archives stored as
files in the old tool shed will be migrated to mercurial repositories in the next gen tool shed. This
script can be run any number of times as it initially eliminates any current repositories and db records
associated with them, and migrates old tool shed stuff to new tool shed stuff.
====== CRITICAL =======
0. This script must be run on a repo updated to changeset: 5621:4618be57481b
1. Before running this script, make sure the following config setting is set in tool_shed_wsgi.ini
# Enable next-gen tool shed features
enable_next_gen_tool_shed = True
2. This script requires the Galaxy instance to use Postgres for database storage.
To run this script, use "sh migrate_tools_to_repositories.sh" from this directory
'''
import ConfigParser
import os
import shutil
import sys
import tarfile
import tempfile
from time import strftime
from mercurial import hg, ui
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'lib')))
import galaxy.webapps.tool_shed.app
assert sys.version_info[:2] >= ( 2, 4 )
def directory_hash_id( id ):
s = str( id )
l = len( s )
# Shortcut -- ids 0-999 go under ../000/
if l < 4:
return [ "000" ]
# Pad with zeros until a multiple of three
padded = ( ( ( 3 - len( s ) ) % 3 ) * "0" ) + s
# Drop the last three digits -- 1000 files per directory
padded = padded[:-3]
# Break into chunks of three
return [ padded[i * 3:(i + 1) * 3] for i in range( len( padded ) // 3 ) ]
def get_versions( app, item ):
"""Get all versions of item whose state is a valid state"""
valid_states = [ app.model.Tool.states.NEW,
app.model.Tool.states.WAITING,
app.model.Tool.states.APPROVED,
app.model.Tool.states.ARCHIVED ]
versions = [ item ]
this_item = item
while item.newer_version:
if item.newer_version.state in valid_states:
versions.append( item.newer_version )
item = item.newer_version
item = this_item
while item.older_version:
if item.older_version[ 0 ].state in valid_states:
versions.insert( 0, item.older_version[ 0 ] )
item = item.older_version[ 0 ]
return versions
def get_approved_tools( app, sa_session ):
"""Get only the latest version of each tool from the database whose state is approved"""
tools = []
for tool in sa_session.query( app.model.Tool ) \
.order_by( app.model.Tool.table.c.name ):
if tool.state == app.model.Tool.states.APPROVED:
tools.append( tool )
return tools
def create_repository_from_tool( app, sa_session, tool ):
# Make the repository name a form of the tool's tool_id by
# lower-casing everything and replacing any blank spaces with underscores.
repo_name = tool.tool_id.lower().replace( ' ', '_' )
print "Creating repository '%s' in database" % ( repo_name )
repository = app.model.Repository( name=repo_name,
description=tool.description,
user_id=tool.user_id )
# Flush to get the id
sa_session.add( repository )
sa_session.flush()
# Determine the local repository's path on disk
dir = os.path.join( app.config.file_path, *directory_hash_id( repository.id ) )
# Create directory if it does not exist
if not os.path.exists( dir ):
os.makedirs( dir )
# Define repository name inside hashed directory
repository_path = os.path.join( dir, "repo_%d" % repository.id )
# Create repository directory
if not os.path.exists( repository_path ):
os.makedirs( repository_path )
# Create the local hg repository
print "Creating repository '%s' on disk" % ( os.path.abspath( repository_path ) )
hg.repository( ui.ui(), os.path.abspath( repository_path ), create=True )
# Add an entry in the hgweb.config file for the new repository - this enables calls to repository.repo_path
add_hgweb_config_entry( repository, repository_path )
# Migrate tool categories
for tca in tool.categories:
category = tca.category
print "Associating category '%s' with repository '%s' in database" % ( category.name, repository.name )
rca = app.model.RepositoryCategoryAssociation( repository, category )
sa_session.add( rca )
sa_session.flush()
# Migrate tool ratings
print "Associating ratings for tool '%s' with repository '%s'" % ( tool.name, repository.name )
for tra in tool.ratings:
rra = app.model.RepositoryRatingAssociation( user=tra.user,
rating=tra.rating,
comment=tra.comment )
rra.repository = repository
sa_session.add( rra )
sa_session.flush()
def add_hgweb_config_entry( repository, repository_path ):
# Add an entry in the hgweb.config file for a new repository. This enables calls to repository.repo_path.
# An entry looks something like: repos/test/mira_assembler = database/community_files/000/repo_123
hgweb_config = "%s/hgweb.config" % os.getcwd()
entry = "repos/%s/%s = %s" % ( repository.user.username, repository.name, repository_path.lstrip( './' ) )
if os.path.exists( hgweb_config ):
output = open( hgweb_config, 'a' )
else:
output = open( hgweb_config, 'w' )
output.write( '[paths]\n' )
output.write( "%s\n" % entry )
output.close()
def create_hgrc_file( repository ):
# At this point, an entry for the repository is required to be in the hgweb.config
# file so we can call repository.repo_path.
# Create a .hg/hgrc file that looks something like this:
# [web]
# allow_push = test
# name = convert_characters1
# push_ssl = False
# Upon repository creation, only the owner can push to it ( allow_push setting ),
# and since we support both http and https, we set push_ssl to False to override
# the default (which is True) in the mercurial api.
hgrc_file = os.path.abspath( os.path.join( repository.repo_path, ".hg", "hgrc" ) )
output = open( hgrc_file, 'w' )
output.write( '[web]\n' )
output.write( 'allow_push = %s\n' % repository.user.username )
output.write( 'name = %s\n' % repository.name )
output.write( 'push_ssl = false\n' )
output.flush()
output.close()
def add_tool_files_to_repository( app, sa_session, tool ):
current_working_dir = os.getcwd()
# Get the repository to which the tool will be migrated
repo_name = tool.tool_id.lower().replace( ' ', '_' )
repository = get_repository_by_name( app, sa_session, repo_name )
repo_path = os.path.abspath( repository.repo_path )
# Get all valid versions of the tool
tool_versions = get_versions( app, tool )
for tool_version in tool_versions:
print "------------------------------"
print "Migrating tool '%s' version '%s' from archive to repository '%s'" % ( tool_version.tool_id, tool_version.version, repo_path )
# Make a temporary working directory
tmp_dir = tempfile.mkdtemp()
tmp_archive_dir = os.path.join( tmp_dir, 'tmp_archive_dir' )
if not os.path.exists( tmp_archive_dir ):
os.makedirs( tmp_archive_dir )
cmd = "hg clone %s" % repo_path
os.chdir( tmp_archive_dir )
os.system( cmd )
os.chdir( current_working_dir )
cloned_repo_dir = os.path.join( tmp_archive_dir, 'repo_%d' % repository.id )
# We want these change sets to be associated with the owner of the repository, so we'll
# set the HGUSER environment variable accordingly. We do this because in the mercurial
# api, the default username to be used in commits is determined in this order: $HGUSER,
# [ui] section of hgrcs, $EMAIL and stop searching if one of these is set.
os.environ[ 'HGUSER' ] = repository.user.username
# Copy the tool archive to the tmp_archive_dir. The src file cannot be derived from
# tool.file_name here because we have not loaded the Tool class in the model, so the
# tool.file_name defaults to /tmp/...
dir = os.path.join( app.config.file_path, 'tools', *directory_hash_id( tool_version.id ) )
src = os.path.abspath( os.path.join( dir, 'tool_%d.dat' % tool_version.id ) )
dst = os.path.join( tmp_archive_dir, tool_archive_file_name( tool_version, src ) )
shutil.copy( src, dst )
# Extract the archive to cloned_repo_dir
tarfile.open( dst ).extractall( path=cloned_repo_dir )
# Remove the archive
os.remove( dst )
# Change current working directory to the cloned repository
os.chdir( cloned_repo_dir )
for root, dirs, files in os.walk( cloned_repo_dir ):
if '.hg' in dirs:
# Don't visit .hg directories
dirs.remove( '.hg' )
if 'hgrc' in files:
# Don't include hgrc files in commit - should be impossible
# since we don't visit .hg dirs, but just in case...
files.remove( 'hgrc' )
for dir in dirs:
os.system( "hg add %s" % dir )
for name in files:
print "Adding file '%s' to cloned repository at %s" % ( name, str( os.getcwd() ) )
os.system( "hg add %s" % name )
print "Committing change set to cloned repository at %s" % str( os.getcwd() )
os.system( "hg commit -m 'Migrated tool version %s from old tool shed archive to new tool shed repository'" % tool_version.version )
print "Pushing changeset from cloned repository '%s' to repository '%s'" % ( cloned_repo_dir, repo_path )
cmd = "hg push %s" % repo_path
print "cmd is: ", cmd
os.system( cmd )
# The tool shed includes a repository source file browser, which currently depends upon
# copies of the hg repository file store in the repo_path for browsing. We'll do the
# following to make these copies.
os.chdir( repo_path )
os.system( 'hg update' )
# Change the current working directory to the original
os.chdir( current_working_dir )
# Now that we have out new repository made current with all change sets,
# we'll create a hgrc file for it.
create_hgrc_file( repository )
# Remove tmp directory
shutil.rmtree( tmp_dir )
def get_repository_by_name( app, sa_session, repo_name ):
"""Get a repository from the database"""
return sa_session.query( app.model.Repository ).filter_by( name=repo_name ).one()
def contains( containing_str, contained_str ):
return containing_str.lower().find( contained_str.lower() ) >= 0
def tool_archive_extension( file_name ):
extension = None
if extension is None:
head = open( file_name, 'rb' ).read( 4 )
try:
assert head[:3] == 'BZh'
assert int( head[-1] ) in range( 0, 10 )
extension = 'tar.bz2'
except AssertionError:
pass
if extension is None:
try:
assert head[:2] == '\037\213'
extension = 'tar.gz'
except:
pass
if extension is None:
extension = 'tar'
return extension
def tool_archive_file_name( tool, file_name ):
return '%s_%s.%s' % ( tool.tool_id, tool.version, tool_archive_extension( file_name ) )
def main():
if len( sys.argv ) < 2:
print "Usage: python %s <Tool shed config file>" % sys.argv[0]
sys.exit( 0 )
now = strftime( "%Y-%m-%d %H:%M:%S" )
print " "
print "##########################################"
print "%s - Migrating current tool archives to new tool repositories" % now
# tool_shed_wsgi.ini file
ini_file = sys.argv[1]
conf_parser = ConfigParser.ConfigParser( {'here': os.getcwd()} )
conf_parser.read( ini_file )
try:
db_conn_str = conf_parser.get( "app:main", "database_connection" )
except ConfigParser.NoOptionError:
db_conn_str = conf_parser.get( "app:main", "database_file" )
print 'DB Connection: ', db_conn_str
# Instantiate app
configuration = {}
for key, value in conf_parser.items( "app:main" ):
configuration[key] = value
app = galaxy.webapps.tool_shed.app.UniverseApplication( global_conf=dict( __file__=ini_file ), **configuration )
sa_session = app.model.context
# Remove the hgweb.config file if it exists
hgweb_config = "%s/hgweb.config" % os.getcwd()
if os.path.exists( hgweb_config ):
print "Removing old file: ", hgweb_config
os.remove( hgweb_config )
repo_records = 0
rca_records = 0
rra_records = 0
for repo in sa_session.query( app.model.Repository ):
# Remove the hg repository from disk. We have to be careful here, because old
# tool files exist in app.config.file_path/tools and we don't want to delete them
dir = os.path.join( app.config.file_path, *directory_hash_id( repo.id ) )
if os.path.exists( dir ):
print "Removing old repository file directory: ", dir
shutil.rmtree( dir )
# Delete all records from db tables:
# repository_category_association, repository_rating_association, repository
print "Deleting db records for repository: ", repo.name
for rca in repo.categories:
sa_session.delete( rca )
rca_records += 1
for rra in repo.ratings:
sa_session.delete( rra )
rra_records += 1
sa_session.delete( repo )
repo_records += 1
sa_session.flush()
print "Deleted %d rows from the repository table" % repo_records
print "Deleted %d rows from the repository_category_association table" % rca_records
print "Deleted %d rows from the repository_rating_association table" % rra_records
# Migrate database tool, tool category and tool rating records to new
# database repository, repository category and repository rating records
# and create the hg repository on disk for each.
for tool in get_approved_tools( app, sa_session ):
create_repository_from_tool( app, sa_session, tool )
# Add, commit and push all valid versions of each approved tool to the
# associated hg repository.
for tool in get_approved_tools( app, sa_session ):
add_tool_files_to_repository( app, sa_session, tool )
app.shutdown()
print ' '
print 'Migration to next gen tool shed complete...'
print "##########################################"
sys.exit(0)
if __name__ == "__main__":
main()
| 43.884615 | 140 | 0.644239 |
'''
Migrate old Galaxy tool shed to next gen Galaxy tool shed. Specifically, the tool archives stored as
files in the old tool shed will be migrated to mercurial repositories in the next gen tool shed. This
script can be run any number of times as it initially eliminates any current repositories and db records
associated with them, and migrates old tool shed stuff to new tool shed stuff.
====== CRITICAL =======
0. This script must be run on a repo updated to changeset: 5621:4618be57481b
1. Before running this script, make sure the following config setting is set in tool_shed_wsgi.ini
# Enable next-gen tool shed features
enable_next_gen_tool_shed = True
2. This script requires the Galaxy instance to use Postgres for database storage.
To run this script, use "sh migrate_tools_to_repositories.sh" from this directory
'''
import ConfigParser
import os
import shutil
import sys
import tarfile
import tempfile
from time import strftime
from mercurial import hg, ui
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'lib')))
import galaxy.webapps.tool_shed.app
assert sys.version_info[:2] >= ( 2, 4 )
def directory_hash_id( id ):
s = str( id )
l = len( s )
if l < 4:
return [ "000" ]
padded = ( ( ( 3 - len( s ) ) % 3 ) * "0" ) + s
padded = padded[:-3]
return [ padded[i * 3:(i + 1) * 3] for i in range( len( padded ) // 3 ) ]
def get_versions( app, item ):
"""Get all versions of item whose state is a valid state"""
valid_states = [ app.model.Tool.states.NEW,
app.model.Tool.states.WAITING,
app.model.Tool.states.APPROVED,
app.model.Tool.states.ARCHIVED ]
versions = [ item ]
this_item = item
while item.newer_version:
if item.newer_version.state in valid_states:
versions.append( item.newer_version )
item = item.newer_version
item = this_item
while item.older_version:
if item.older_version[ 0 ].state in valid_states:
versions.insert( 0, item.older_version[ 0 ] )
item = item.older_version[ 0 ]
return versions
def get_approved_tools( app, sa_session ):
"""Get only the latest version of each tool from the database whose state is approved"""
tools = []
for tool in sa_session.query( app.model.Tool ) \
.order_by( app.model.Tool.table.c.name ):
if tool.state == app.model.Tool.states.APPROVED:
tools.append( tool )
return tools
def create_repository_from_tool( app, sa_session, tool ):
# lower-casing everything and replacing any blank spaces with underscores.
repo_name = tool.tool_id.lower().replace( ' ', '_' )
print "Creating repository '%s' in database" % ( repo_name )
repository = app.model.Repository( name=repo_name,
description=tool.description,
user_id=tool.user_id )
# Flush to get the id
sa_session.add( repository )
sa_session.flush()
# Determine the local repository's path on disk
dir = os.path.join( app.config.file_path, *directory_hash_id( repository.id ) )
if not os.path.exists( dir ):
os.makedirs( dir )
repository_path = os.path.join( dir, "repo_%d" % repository.id )
if not os.path.exists( repository_path ):
os.makedirs( repository_path )
print "Creating repository '%s' on disk" % ( os.path.abspath( repository_path ) )
hg.repository( ui.ui(), os.path.abspath( repository_path ), create=True )
add_hgweb_config_entry( repository, repository_path )
for tca in tool.categories:
category = tca.category
print "Associating category '%s' with repository '%s' in database" % ( category.name, repository.name )
rca = app.model.RepositoryCategoryAssociation( repository, category )
sa_session.add( rca )
sa_session.flush()
print "Associating ratings for tool '%s' with repository '%s'" % ( tool.name, repository.name )
for tra in tool.ratings:
rra = app.model.RepositoryRatingAssociation( user=tra.user,
rating=tra.rating,
comment=tra.comment )
rra.repository = repository
sa_session.add( rra )
sa_session.flush()
def add_hgweb_config_entry( repository, repository_path ):
hgweb_config = "%s/hgweb.config" % os.getcwd()
entry = "repos/%s/%s = %s" % ( repository.user.username, repository.name, repository_path.lstrip( './' ) )
if os.path.exists( hgweb_config ):
output = open( hgweb_config, 'a' )
else:
output = open( hgweb_config, 'w' )
output.write( '[paths]\n' )
output.write( "%s\n" % entry )
output.close()
def create_hgrc_file( repository ):
hgrc_file = os.path.abspath( os.path.join( repository.repo_path, ".hg", "hgrc" ) )
output = open( hgrc_file, 'w' )
output.write( '[web]\n' )
output.write( 'allow_push = %s\n' % repository.user.username )
output.write( 'name = %s\n' % repository.name )
output.write( 'push_ssl = false\n' )
output.flush()
output.close()
def add_tool_files_to_repository( app, sa_session, tool ):
current_working_dir = os.getcwd()
repo_name = tool.tool_id.lower().replace( ' ', '_' )
repository = get_repository_by_name( app, sa_session, repo_name )
repo_path = os.path.abspath( repository.repo_path )
tool_versions = get_versions( app, tool )
for tool_version in tool_versions:
print "------------------------------"
print "Migrating tool '%s' version '%s' from archive to repository '%s'" % ( tool_version.tool_id, tool_version.version, repo_path )
tmp_dir = tempfile.mkdtemp()
tmp_archive_dir = os.path.join( tmp_dir, 'tmp_archive_dir' )
if not os.path.exists( tmp_archive_dir ):
os.makedirs( tmp_archive_dir )
cmd = "hg clone %s" % repo_path
os.chdir( tmp_archive_dir )
os.system( cmd )
os.chdir( current_working_dir )
cloned_repo_dir = os.path.join( tmp_archive_dir, 'repo_%d' % repository.id )
# set the HGUSER environment variable accordingly. We do this because in the mercurial
# api, the default username to be used in commits is determined in this order: $HGUSER,
# [ui] section of hgrcs, $EMAIL and stop searching if one of these is set.
os.environ[ 'HGUSER' ] = repository.user.username
# Copy the tool archive to the tmp_archive_dir. The src file cannot be derived from
# tool.file_name here because we have not loaded the Tool class in the model, so the
# tool.file_name defaults to /tmp/...
dir = os.path.join( app.config.file_path, 'tools', *directory_hash_id( tool_version.id ) )
src = os.path.abspath( os.path.join( dir, 'tool_%d.dat' % tool_version.id ) )
dst = os.path.join( tmp_archive_dir, tool_archive_file_name( tool_version, src ) )
shutil.copy( src, dst )
# Extract the archive to cloned_repo_dir
tarfile.open( dst ).extractall( path=cloned_repo_dir )
# Remove the archive
os.remove( dst )
# Change current working directory to the cloned repository
os.chdir( cloned_repo_dir )
for root, dirs, files in os.walk( cloned_repo_dir ):
if '.hg' in dirs:
# Don't visit .hg directories
dirs.remove( '.hg' )
if 'hgrc' in files:
# since we don't visit .hg dirs, but just in case...
files.remove( 'hgrc' )
for dir in dirs:
os.system( "hg add %s" % dir )
for name in files:
print "Adding file '%s' to cloned repository at %s" % ( name, str( os.getcwd() ) )
os.system( "hg add %s" % name )
print "Committing change set to cloned repository at %s" % str( os.getcwd() )
os.system( "hg commit -m 'Migrated tool version %s from old tool shed archive to new tool shed repository'" % tool_version.version )
print "Pushing changeset from cloned repository '%s' to repository '%s'" % ( cloned_repo_dir, repo_path )
cmd = "hg push %s" % repo_path
print "cmd is: ", cmd
os.system( cmd )
# following to make these copies.
os.chdir( repo_path )
os.system( 'hg update' )
# Change the current working directory to the original
os.chdir( current_working_dir )
# Now that we have out new repository made current with all change sets,
# we'll create a hgrc file for it.
create_hgrc_file( repository )
shutil.rmtree( tmp_dir )
def get_repository_by_name( app, sa_session, repo_name ):
"""Get a repository from the database"""
return sa_session.query( app.model.Repository ).filter_by( name=repo_name ).one()
def contains( containing_str, contained_str ):
return containing_str.lower().find( contained_str.lower() ) >= 0
def tool_archive_extension( file_name ):
extension = None
if extension is None:
head = open( file_name, 'rb' ).read( 4 )
try:
assert head[:3] == 'BZh'
assert int( head[-1] ) in range( 0, 10 )
extension = 'tar.bz2'
except AssertionError:
pass
if extension is None:
try:
assert head[:2] == '\037\213'
extension = 'tar.gz'
except:
pass
if extension is None:
extension = 'tar'
return extension
def tool_archive_file_name( tool, file_name ):
return '%s_%s.%s' % ( tool.tool_id, tool.version, tool_archive_extension( file_name ) )
def main():
if len( sys.argv ) < 2:
print "Usage: python %s <Tool shed config file>" % sys.argv[0]
sys.exit( 0 )
now = strftime( "%Y-%m-%d %H:%M:%S" )
print " "
print "##########################################"
print "%s - Migrating current tool archives to new tool repositories" % now
ini_file = sys.argv[1]
conf_parser = ConfigParser.ConfigParser( {'here': os.getcwd()} )
conf_parser.read( ini_file )
try:
db_conn_str = conf_parser.get( "app:main", "database_connection" )
except ConfigParser.NoOptionError:
db_conn_str = conf_parser.get( "app:main", "database_file" )
print 'DB Connection: ', db_conn_str
configuration = {}
for key, value in conf_parser.items( "app:main" ):
configuration[key] = value
app = galaxy.webapps.tool_shed.app.UniverseApplication( global_conf=dict( __file__=ini_file ), **configuration )
sa_session = app.model.context
hgweb_config = "%s/hgweb.config" % os.getcwd()
if os.path.exists( hgweb_config ):
print "Removing old file: ", hgweb_config
os.remove( hgweb_config )
repo_records = 0
rca_records = 0
rra_records = 0
for repo in sa_session.query( app.model.Repository ):
dir = os.path.join( app.config.file_path, *directory_hash_id( repo.id ) )
if os.path.exists( dir ):
print "Removing old repository file directory: ", dir
shutil.rmtree( dir )
# Delete all records from db tables:
# repository_category_association, repository_rating_association, repository
print "Deleting db records for repository: ", repo.name
for rca in repo.categories:
sa_session.delete( rca )
rca_records += 1
for rra in repo.ratings:
sa_session.delete( rra )
rra_records += 1
sa_session.delete( repo )
repo_records += 1
sa_session.flush()
print "Deleted %d rows from the repository table" % repo_records
print "Deleted %d rows from the repository_category_association table" % rca_records
print "Deleted %d rows from the repository_rating_association table" % rra_records
# Migrate database tool, tool category and tool rating records to new
# database repository, repository category and repository rating records
# and create the hg repository on disk for each.
for tool in get_approved_tools( app, sa_session ):
create_repository_from_tool( app, sa_session, tool )
# Add, commit and push all valid versions of each approved tool to the
# associated hg repository.
for tool in get_approved_tools( app, sa_session ):
add_tool_files_to_repository( app, sa_session, tool )
app.shutdown()
print ' '
print 'Migration to next gen tool shed complete...'
print "##########################################"
sys.exit(0)
if __name__ == "__main__":
main()
| false | true |
f724f2e2d80fad9431aae8674677cf6022972166 | 7,735 | py | Python | telepresence/proxy/remote.py | Nesurion/telepresence | cfe60eb91b42345ff890b7726c6388e923bc441a | [
"Apache-2.0"
] | null | null | null | telepresence/proxy/remote.py | Nesurion/telepresence | cfe60eb91b42345ff890b7726c6388e923bc441a | [
"Apache-2.0"
] | null | null | null | telepresence/proxy/remote.py | Nesurion/telepresence | cfe60eb91b42345ff890b7726c6388e923bc441a | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from subprocess import STDOUT, CalledProcessError
from typing import Dict, Optional
from telepresence import image_version
from telepresence.runner import Runner
class RemoteInfo(object):
"""
Information about the remote setup.
:ivar namespace str: The Kubernetes namespace.
:ivar context str: The Kubernetes context.
:ivar deployment_name str: The name of the Deployment object.
:ivar pod_name str: The name of the pod created by the Deployment.
:ivar deployment_config dict: The decoded k8s object (i.e. JSON/YAML).
:ivar container_config dict: The container within the Deployment JSON.
:ivar container_name str: The name of the container.
"""
def __init__(
self,
runner: Runner,
deployment_name: str,
pod_name: str,
deployment_config: dict,
) -> None:
self.deployment_name = deployment_name
self.pod_name = pod_name
self.deployment_config = deployment_config
cs = deployment_config["spec"]["template"]["spec"]["containers"]
containers = [c for c in cs if "telepresence-k8s" in c["image"]]
if not containers:
containers = [c for c in cs if "telepresence-proxy" in c["image"]]
if not containers:
raise RuntimeError(
"Could not find container with image "
"'datawire/telepresence-k8s' in pod {}.".format(pod_name)
)
self.container_config = containers[0] # type: Dict
self.container_name = self.container_config["name"] # type: str
def remote_telepresence_version(self) -> str:
"""Return the version used by the remote Telepresence container."""
name, version = self.container_config["image"].split(":")
if name.endswith("telepresence-proxy"):
return image_version
return version
def get_deployment_json(
runner: Runner,
deployment_name: str,
deployment_type: str,
run_id: Optional[str] = None,
) -> Dict:
"""Get the decoded JSON for a deployment.
If this is a Deployment we created, the run_id is also passed in - this is
the session id we set for the telepresence label. Otherwise run_id is None
and the Deployment name must be used to locate the Deployment.
"""
span = runner.span()
try:
get_deployment = [
"get",
deployment_type,
"-o",
"json",
"--export",
]
if run_id is None:
return json.loads(
runner.get_output(
runner.kubectl(get_deployment + [deployment_name]),
stderr=STDOUT
)
)
else:
# When using a selector we get a list of objects, not just one:
return json.loads(
runner.get_output(
runner.kubectl(
get_deployment + ["--selector=telepresence=" + run_id]
),
stderr=STDOUT
)
)["items"][0]
except CalledProcessError as e:
raise runner.fail(
"Failed to find deployment {}:\n{}".format(
deployment_name, e.stdout
)
)
finally:
span.end()
def wait_for_pod(runner: Runner, remote_info: RemoteInfo) -> None:
"""Wait for the pod to start running."""
span = runner.span()
for _ in runner.loop_until(120, 0.25):
try:
pod = json.loads(
runner.get_output(
runner.kubectl(
"get", "pod", remote_info.pod_name, "-o", "json"
)
)
)
except CalledProcessError:
continue
if pod["status"]["phase"] == "Running":
for container in pod["status"]["containerStatuses"]:
if container["name"] == remote_info.container_name and (
container["ready"]
):
span.end()
return
span.end()
raise RuntimeError(
"Pod isn't starting or can't be found: {}".format(pod["status"])
)
def get_remote_info(
runner: Runner,
deployment_name: str,
deployment_type: str,
timeout: float,
run_id: Optional[str] = None,
) -> RemoteInfo:
"""
Given the deployment name, return a RemoteInfo object.
If this is a Deployment we created, the run_id is also passed in - this is
the session identifier we set for the telepresence label. Otherwise run_id
is None and the Deployment name must be used to locate the Deployment.
"""
span = runner.span()
deployment = get_deployment_json(
runner, deployment_name, deployment_type, run_id=run_id
)
dst_metadata = deployment["spec"]["template"]["metadata"]
expected_labels = dst_metadata.get("labels", {})
runner.write("Searching for Telepresence pod:")
runner.write(" with name {}-*".format(deployment_name))
runner.write(" with labels {}".format(expected_labels))
cmd = "get pod -o json --export".split()
if run_id:
cmd.append("--selector=telepresence={}".format(run_id))
for _ in runner.loop_until(timeout, 1):
pods = json.loads(runner.get_output(runner.kubectl(cmd)))["items"]
for pod in pods:
name = pod["metadata"]["name"]
phase = pod["status"]["phase"]
labels = pod["metadata"].get("labels", {})
runner.write("Checking {}".format(name))
if not name.startswith(deployment_name + "-"):
runner.write("--> Name does not match")
continue
if phase not in ("Pending", "Running"):
runner.write("--> Wrong phase: {}".format(phase))
continue
if not set(expected_labels.items()).issubset(set(labels.items())):
runner.write("--> Labels don't match: {}".format(labels))
continue
runner.write("Looks like we've found our pod!\n")
remote_info = RemoteInfo(
runner,
deployment_name,
name,
deployment,
)
# Ensure remote container is running same version as we are:
remote_version = remote_info.remote_telepresence_version()
if remote_version != image_version:
runner.write("Pod is running Tel {}".format(remote_version))
raise runner.fail((
"The remote datawire/telepresence-k8s container is " +
"running version {}, but this tool is version {}. " +
"Please make sure both are running the same version."
).format(remote_version, image_version))
# Wait for pod to be running:
wait_for_pod(runner, remote_info)
span.end()
return remote_info
# Didn't find pod...
span.end()
raise RuntimeError(
"Telepresence pod not found for Deployment '{}'.".
format(deployment_name)
)
| 35.645161 | 78 | 0.587589 |
import json
from subprocess import STDOUT, CalledProcessError
from typing import Dict, Optional
from telepresence import image_version
from telepresence.runner import Runner
class RemoteInfo(object):
def __init__(
self,
runner: Runner,
deployment_name: str,
pod_name: str,
deployment_config: dict,
) -> None:
self.deployment_name = deployment_name
self.pod_name = pod_name
self.deployment_config = deployment_config
cs = deployment_config["spec"]["template"]["spec"]["containers"]
containers = [c for c in cs if "telepresence-k8s" in c["image"]]
if not containers:
containers = [c for c in cs if "telepresence-proxy" in c["image"]]
if not containers:
raise RuntimeError(
"Could not find container with image "
"'datawire/telepresence-k8s' in pod {}.".format(pod_name)
)
self.container_config = containers[0]
self.container_name = self.container_config["name"]
def remote_telepresence_version(self) -> str:
name, version = self.container_config["image"].split(":")
if name.endswith("telepresence-proxy"):
return image_version
return version
def get_deployment_json(
runner: Runner,
deployment_name: str,
deployment_type: str,
run_id: Optional[str] = None,
) -> Dict:
span = runner.span()
try:
get_deployment = [
"get",
deployment_type,
"-o",
"json",
"--export",
]
if run_id is None:
return json.loads(
runner.get_output(
runner.kubectl(get_deployment + [deployment_name]),
stderr=STDOUT
)
)
else:
return json.loads(
runner.get_output(
runner.kubectl(
get_deployment + ["--selector=telepresence=" + run_id]
),
stderr=STDOUT
)
)["items"][0]
except CalledProcessError as e:
raise runner.fail(
"Failed to find deployment {}:\n{}".format(
deployment_name, e.stdout
)
)
finally:
span.end()
def wait_for_pod(runner: Runner, remote_info: RemoteInfo) -> None:
span = runner.span()
for _ in runner.loop_until(120, 0.25):
try:
pod = json.loads(
runner.get_output(
runner.kubectl(
"get", "pod", remote_info.pod_name, "-o", "json"
)
)
)
except CalledProcessError:
continue
if pod["status"]["phase"] == "Running":
for container in pod["status"]["containerStatuses"]:
if container["name"] == remote_info.container_name and (
container["ready"]
):
span.end()
return
span.end()
raise RuntimeError(
"Pod isn't starting or can't be found: {}".format(pod["status"])
)
def get_remote_info(
runner: Runner,
deployment_name: str,
deployment_type: str,
timeout: float,
run_id: Optional[str] = None,
) -> RemoteInfo:
span = runner.span()
deployment = get_deployment_json(
runner, deployment_name, deployment_type, run_id=run_id
)
dst_metadata = deployment["spec"]["template"]["metadata"]
expected_labels = dst_metadata.get("labels", {})
runner.write("Searching for Telepresence pod:")
runner.write(" with name {}-*".format(deployment_name))
runner.write(" with labels {}".format(expected_labels))
cmd = "get pod -o json --export".split()
if run_id:
cmd.append("--selector=telepresence={}".format(run_id))
for _ in runner.loop_until(timeout, 1):
pods = json.loads(runner.get_output(runner.kubectl(cmd)))["items"]
for pod in pods:
name = pod["metadata"]["name"]
phase = pod["status"]["phase"]
labels = pod["metadata"].get("labels", {})
runner.write("Checking {}".format(name))
if not name.startswith(deployment_name + "-"):
runner.write("--> Name does not match")
continue
if phase not in ("Pending", "Running"):
runner.write("--> Wrong phase: {}".format(phase))
continue
if not set(expected_labels.items()).issubset(set(labels.items())):
runner.write("--> Labels don't match: {}".format(labels))
continue
runner.write("Looks like we've found our pod!\n")
remote_info = RemoteInfo(
runner,
deployment_name,
name,
deployment,
)
remote_version = remote_info.remote_telepresence_version()
if remote_version != image_version:
runner.write("Pod is running Tel {}".format(remote_version))
raise runner.fail((
"The remote datawire/telepresence-k8s container is " +
"running version {}, but this tool is version {}. " +
"Please make sure both are running the same version."
).format(remote_version, image_version))
wait_for_pod(runner, remote_info)
span.end()
return remote_info
span.end()
raise RuntimeError(
"Telepresence pod not found for Deployment '{}'.".
format(deployment_name)
)
| true | true |
f724f33ecccdbc81d47a05655141217459e84376 | 3,882 | py | Python | src/simulate.py | ElanVB/noisy_signal_prop | 3ad81e15f02a92b3a669c9b81c8b2f12f331a1b6 | [
"MIT"
] | 5 | 2018-10-31T08:55:37.000Z | 2020-01-14T08:18:22.000Z | src/simulate.py | ElanVB/noisy_signal_prop | 3ad81e15f02a92b3a669c9b81c8b2f12f331a1b6 | [
"MIT"
] | null | null | null | src/simulate.py | ElanVB/noisy_signal_prop | 3ad81e15f02a92b3a669c9b81c8b2f12f331a1b6 | [
"MIT"
] | null | null | null |
# imports
import numpy as np
import os, sys, pickle
file_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(file_dir)
# custom import
from theory import depth
from viz import get_colours
from numpy_simulation import *
from utils import load_experiment
from theory import critical_point
def perform_experiment(experiments):
for i, experiment in enumerate(experiments):
dist = experiment['dist']
noise = experiment['noise']
act = experiment['act']
init = experiment['init']
# run simulations for scenario
noisy_signal_prop_simulations(dist, noise, act, init, seed=i)
def variance():
experiments = [
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"underflow"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"overflow"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"underflow"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"overflow"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
def correlation():
# Compute experimental data
experiments = [
{"dist": "none", "noise": (None, None), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.8), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 2), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
def fixed_point():
# Compute experimental data
experiments = [
{"dist": "bern", "noise": ('prob_1', 0.1), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.2), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.3), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.4), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.5), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.7), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.8), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.9), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.1), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.4), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.55), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.7), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.85), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.0), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.15), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.3), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.45), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.6), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.75), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.9), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
if __name__ == "__main__":
# results directory
results_dir = os.path.join(file_dir, "../results")
# variance()
# correlation()
fixed_point()
| 44.62069 | 89 | 0.519578 |
import numpy as np
import os, sys, pickle
file_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(file_dir)
from theory import depth
from viz import get_colours
from numpy_simulation import *
from utils import load_experiment
from theory import critical_point
def perform_experiment(experiments):
for i, experiment in enumerate(experiments):
dist = experiment['dist']
noise = experiment['noise']
act = experiment['act']
init = experiment['init']
noisy_signal_prop_simulations(dist, noise, act, init, seed=i)
def variance():
experiments = [
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"underflow"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"overflow"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"underflow"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"overflow"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
def correlation():
experiments = [
{"dist": "none", "noise": (None, None), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.8), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 2), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
def fixed_point():
experiments = [
{"dist": "bern", "noise": ('prob_1', 0.1), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.2), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.3), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.4), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.5), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.7), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.8), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.9), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.1), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.4), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.55), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.7), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.85), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.0), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.15), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.3), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.45), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.6), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.75), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.9), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
if __name__ == "__main__":
results_dir = os.path.join(file_dir, "../results")
fixed_point()
| true | true |
f724f3b40f32d0b3f43e1e0eb69678d13e641ccd | 872 | py | Python | python-threatexchange/threatexchange/extensions/text_tlsh/tests/test_tlsh_hash_and_match.py | dxdc/ThreatExchange | f9aff6dd0c90e6c47ffe4151bced4de1676d84f6 | [
"BSD-3-Clause"
] | null | null | null | python-threatexchange/threatexchange/extensions/text_tlsh/tests/test_tlsh_hash_and_match.py | dxdc/ThreatExchange | f9aff6dd0c90e6c47ffe4151bced4de1676d84f6 | [
"BSD-3-Clause"
] | null | null | null | python-threatexchange/threatexchange/extensions/text_tlsh/tests/test_tlsh_hash_and_match.py | dxdc/ThreatExchange | f9aff6dd0c90e6c47ffe4151bced4de1676d84f6 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from threatexchange.extensions.text_tlsh.text_tlsh import TextTLSHSignal
try:
import tlsh
_DISABLED = False
except ImportError:
_DISABLED = True
@unittest.skipIf(_DISABLED, "tlsh not installed")
class TLSHHasherModuleUnitTest(unittest.TestCase):
def test_tlsh_from_string(self):
expected = {
"A minimum string length must be 256 bytes! "
"That's so much text this means it's not super "
"useful for finding short text!": "T1DFB092A1724AC2C0D3CA48452291E"
"A04A5B75EB903A6E7577A54118FFA8148E98F9426",
"too short": "",
}
for input, expected_hash in expected.items():
hashed = TextTLSHSignal.hash_from_str(input)
assert hashed == expected_hash, f"case: {input}"
| 31.142857 | 79 | 0.681193 |
import unittest
from threatexchange.extensions.text_tlsh.text_tlsh import TextTLSHSignal
try:
import tlsh
_DISABLED = False
except ImportError:
_DISABLED = True
@unittest.skipIf(_DISABLED, "tlsh not installed")
class TLSHHasherModuleUnitTest(unittest.TestCase):
def test_tlsh_from_string(self):
expected = {
"A minimum string length must be 256 bytes! "
"That's so much text this means it's not super "
"useful for finding short text!": "T1DFB092A1724AC2C0D3CA48452291E"
"A04A5B75EB903A6E7577A54118FFA8148E98F9426",
"too short": "",
}
for input, expected_hash in expected.items():
hashed = TextTLSHSignal.hash_from_str(input)
assert hashed == expected_hash, f"case: {input}"
| true | true |
f724f4175b844d00837287ebdccd736416103c4b | 4,382 | py | Python | jeremy/plotting/.Test4TESTESETES.py | jupsal/schmies-jTEM | 565696dbabc70adb50aaaab8f61fad0f91f123c0 | [
"BSD-2-Clause"
] | null | null | null | jeremy/plotting/.Test4TESTESETES.py | jupsal/schmies-jTEM | 565696dbabc70adb50aaaab8f61fad0f91f123c0 | [
"BSD-2-Clause"
] | null | null | null | jeremy/plotting/.Test4TESTESETES.py | jupsal/schmies-jTEM | 565696dbabc70adb50aaaab8f61fad0f91f123c0 | [
"BSD-2-Clause"
] | null | null | null | ###########################################################################
# This file holds the plotting routine for the KP solution from the Schmiesy
# Thesie. There is no timestepping, it plots only the initial condition.
############################################################################
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
from scipy import interpolate
def main():
# Loop over all examples
for exampleNum in xrange(0,3+1):
print exampleNum
cFileName, sFileName, gFileName = defFileNames( exampleNum );
coordData, solnData, groupData = loadData(cFileName, sFileName,
gFileName)
createPlot(coordData, solnData, groupData, str(exampleNum))
# Show all the plots at the end.
plt.show()
def defFileNames( exampleNum ):
coordfile = ('/home/jeremy/Documents/research/RiemannSurfaces/jTEM-Jeremy/'
'/jeremy/plotting/data/Test4/coords' + str(exampleNum)+'.csv' )
solsfile = ('/home/jeremy/Documents/research/RiemannSurfaces/jTEM-Jeremy/'
'/jeremy/plotting/data/Test4/soln' + str(exampleNum)+'.csv' )
groupfile = ('/home/jeremy/Documents/research/RiemannSurfaces/jTEM-Jeremy/'
'/jeremy/plotting/data/Test4/group' + str(exampleNum)+'.csv' )
return coordfile, solsfile, groupfile
def loadData( cFileName, sFileName, gFileName ):
coordData = np.genfromtxt(cFileName, dtype=float, delimiter=',',
names=True); # comes in (t,x,y) tuples
solnData = np.genfromtxt(sFileName, dtype=float, delimiter=',',
names=True); # comes in (Real, Imaginary) pairs.
groupData = np.genfromtxt(gFileName, dtype=float, delimiter=',',
names=True); # comes in (Real, Imaginary) pairs.
return coordData, solnData, groupData
def createPlot(coordData, solnData, groupData, exampleNum=' NOT GIVEN'):
# Create both the KP Soln plot and the Group Data plot side-by-side
fig = plt.figure()
# First plot the group data
ax1 = plt.subplot(2,1,1)
plotGroupData( groupData, ax1 )
# Then plot the KP Soln
ax2 = plt.subplot(2,1,2, projection='3d')
plotKP( coordData, solnData, ax2 )
fig.suptitle('Example Number'+str(exampleNum))
fig.savefig( '/home/jeremy/Documents/research/RiemannSurfaces/jTEM-Jeremy/jeremy/plotting/data/Test4/ExampleNum'
+ str(exampleNum) + '.eps', format = 'eps' )
def plotGroupData( groupData, ax ):
ReCenters = np.array( [ entry[0] for entry in groupData ] )
ImCenters = np.array( [ entry[1] for entry in groupData ] )
Radii = np.array( [ entry[2] for entry in groupData ] )
ReC0 = lambda t,j: ReCenters[j]+Radii[j]*np.cos(t)
ImC0 = lambda t,j: ImCenters[j]+Radii[j]*np.sin(t)
teval = np.arange( 0.0, 2*np.pi, 0.1 ) #the range to evaluate over
ax.plot( ReC0(teval,0), ImC0(teval,0 ) )
ax.plot( ReC0(teval,1), ImC0(teval,1 ) )
ax.set_xlim( [-6, 6] )
ax.set_ylim( [-6, 6] )
plt.gca().set_aspect('equal')
def plotKP( coordData, solnData, ax ):
#Define the data
t = np.zeros(coordData.shape)
x = np.zeros(coordData.shape)
y = np.zeros(coordData.shape)
j = 0;
for txy in coordData:
t[j] = txy[0]
x[j] = txy[1]
y[j] = txy[2]
j = j+1;
# Check that solution is entirely real-valued.
if sum( [ sol[1] for sol in solnData ] )>0: # then solution is not real-valued
raise ValueError("The solution has nonzero imaginary part.")
realSoln = np.array( [ sol[0] for sol in solnData ] )
tstep = len(t)
# Find the xstep by checking for the value at which x changes from 0 to
# nonzero. Since x changes last
xstep = np.where(x[:-1] != x[1:])[0][0] + 1
# Right now this only works for evenly spaced grids. I.e. xstep = ystep.
ystep = xstep
z = realSoln[ 0:tstep ]
# Put it on a grid.
xx = x.reshape( (xstep, xstep) )
yy = y.reshape( (ystep, ystep) )
zz = z.reshape( xx.shape )
surf = ax.plot_surface( xx, yy, zz, rstride=2, cstride=2, cmap=cm.coolwarm,
linewidth = 0.2)#, antialiased = True )
ax.set_zlim( np.min(realSoln), np.max(realSoln) )
ax.set_xlabel('x'); ax.set_ylabel('y');
#ax.set_title('Example Number'+exampleNum)
if __name__ == '__main__':
main()
| 36.823529 | 116 | 0.620037 | false | true | |
f724f5a468382942a4bfe330e8981878747ab446 | 342 | py | Python | backend/rumergy_backend/rumergy/serializers/data_log_measures_serializer.py | Firefly-Tech/rumergy-webapp | 859054bd9ee710a11b393027bb9cb1bad55d0f00 | [
"MIT"
] | 1 | 2021-11-08T00:28:37.000Z | 2021-11-08T00:28:37.000Z | backend/rumergy_backend/rumergy/serializers/data_log_measures_serializer.py | Firefly-Tech/rumergy-webapp | 859054bd9ee710a11b393027bb9cb1bad55d0f00 | [
"MIT"
] | 1 | 2021-11-02T02:17:37.000Z | 2021-11-02T02:17:37.000Z | backend/rumergy_backend/rumergy/serializers/data_log_measures_serializer.py | Firefly-Tech/rumergy-webapp | 859054bd9ee710a11b393027bb9cb1bad55d0f00 | [
"MIT"
] | 1 | 2021-10-18T22:27:04.000Z | 2021-10-18T22:27:04.000Z | from rumergy_backend.rumergy.models import DataLogMeasures
from rest_framework import serializers
class DataLogMeasuresSerializer(serializers.ModelSerializer):
"""Serializer for data log measures model"""
class Meta:
model = DataLogMeasures
fields = ["id", "data_log", "data_point", "value", "timestamp", "status"]
| 31.090909 | 81 | 0.736842 | from rumergy_backend.rumergy.models import DataLogMeasures
from rest_framework import serializers
class DataLogMeasuresSerializer(serializers.ModelSerializer):
class Meta:
model = DataLogMeasures
fields = ["id", "data_log", "data_point", "value", "timestamp", "status"]
| true | true |
f724f7cfee52c3eaa2ba94c61fd8676dd873a730 | 56,226 | py | Python | python/paddle/nn/layer/conv.py | SmirnovKol/Paddle | a3730dc87bc61593514b830727e36e5d19e753cd | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/nn/layer/conv.py | SmirnovKol/Paddle | a3730dc87bc61593514b830727e36e5d19e753cd | [
"Apache-2.0"
] | null | null | null | python/paddle/nn/layer/conv.py | SmirnovKol/Paddle | a3730dc87bc61593514b830727e36e5d19e753cd | [
"Apache-2.0"
] | 1 | 2021-09-24T11:23:36.000Z | 2021-09-24T11:23:36.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define classes of convolutional neural network
import numpy as np
from paddle import get_flags
from ...device import get_cudnn_version
from .. import Layer
from ..initializer import Normal
from .. import functional as F
from ...fluid.layers import utils
from ..functional.conv import _update_padding_nd
from ...device import is_compiled_with_cuda
from ...device import is_compiled_with_rocm
__all__ = []
def _get_default_param_initializer(num_channels, filter_size):
filter_elem_num = num_channels * np.prod(filter_size)
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std)
def _reverse_repeat_list(t, n):
"""Reverse the order of `t` and repeat each element for `n` times.
This can be used to translate padding arg used by Conv and Pooling modules
to the ones used by `F.pad`.
"""
return list(x for x in reversed(t) for _ in range(n))
class _ConvNd(Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
transposed,
dims,
stride=1,
padding=0,
padding_mode='zeros',
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(_ConvNd, self).__init__()
assert weight_attr is not False, "weight_attr should not be False in Conv."
self._param_attr = weight_attr
self._bias_attr = bias_attr
self._groups = groups
self._in_channels = in_channels
self._out_channels = out_channels
self._data_format = data_format
valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
if padding_mode not in valid_padding_modes:
raise ValueError(
"padding_mode must be one of {}, but got padding_mode='{}'".
format(valid_padding_modes, padding_mode))
if padding_mode in {'reflect', 'replicate', 'circular'
} and not isinstance(padding, np.int):
raise TypeError(
"when padding_mode in ['reflect', 'replicate', 'circular'], type of padding must be int"
)
valid_format = {'NHWC', 'NCHW', 'NDHWC', 'NCDHW', 'NLC', 'NCL'}
if data_format not in valid_format:
raise ValueError(
"data_format must be one of {}, but got data_format='{}'".
format(valid_format, data_format))
channel_last = (data_format == "NHWC") or (data_format
== "NDHWC") or (data_format
== "NLC")
if channel_last:
self._channel_dim = len(data_format) - 1
else:
self._channel_dim = 1
self._stride = utils.convert_to_list(stride, dims, 'stride')
self._dilation = utils.convert_to_list(dilation, dims, 'dilation')
self._kernel_size = utils.convert_to_list(kernel_size, dims,
'kernel_size')
self._padding = padding
self._padding_mode = padding_mode
self.output_padding = output_padding
if dims != 1:
self._updated_padding, self._padding_algorithm = _update_padding_nd(
padding, channel_last, dims)
if transposed:
filter_shape = [self._in_channels, out_channels // groups
] + self._kernel_size
else:
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups.")
if padding_mode in {'reflect', 'replicate', 'circular'}:
_paired_padding = utils.convert_to_list(padding, dims,
'padding')
self._reversed_padding_repeated_twice = _reverse_repeat_list(
_paired_padding, 2)
self._updated_padding, self._padding_algorithm = _update_padding_nd(
0, channel_last, dims)
filter_shape = [out_channels, in_channels // groups
] + self._kernel_size
def _get_default_param_initializer():
if transposed:
return None
filter_elem_num = np.prod(self._kernel_size) * self._in_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std)
self.weight = self.create_parameter(
shape=filter_shape,
attr=self._param_attr,
default_initializer=_get_default_param_initializer())
self.bias = self.create_parameter(attr=self._bias_attr,
shape=[self._out_channels],
is_bias=True)
cudnn_version = get_cudnn_version()
self._use_cudnn = True if (is_compiled_with_cuda()
and cudnn_version is not None) else False
self._op_type = "conv" + str(dims) + 'd'
if self._op_type == 'conv2d' and (in_channels == groups
and in_channels != 1
and out_channels % in_channels == 0):
self._op_type = 'depthwise_conv2d'
if is_compiled_with_rocm():
self._use_cudnn = True
else:
self._use_cudnn = False
if (is_compiled_with_cuda() and get_flags("FLAGS_conv2d_disable_cudnn")
["FLAGS_conv2d_disable_cudnn"]):
self._use_cudnn = False
def extra_repr(self):
main_str = '{_in_channels}, {_out_channels}, kernel_size={_kernel_size}'
if self._stride != [1] * len(self._stride):
main_str += ', stride={_stride}'
if self._padding != 0:
main_str += ', padding={_padding}'
if self._padding_mode != 'zeros':
main_str += ', padding_mode={_padding_mode}'
if self.output_padding != 0:
main_str += ', output_padding={output_padding}'
if self._dilation != [1] * len(self._dilation):
main_str += ', dilation={_dilation}'
if self._groups != 1:
main_str += ', groups={_groups}'
main_str += ', data_format={_data_format}'
return main_str.format(**self.__dict__)
class Conv1D(_ConvNd):
r"""
This interface is used to construct a callable object of the ``Conv1D`` class.
For more details, refer to code examples.
The convolution1D layer calculates the output based on the input, filter
and stride, padding, dilation, groups parameters. Input and
Output are in NCL format or NLC format, where N is batch size, C is the number of
the feature map, L is the length of the feature map.
Filter's shape is [MCK] , where M is the number of output feature map,
C is the number of input feature map, K is the size of the kernel.
If the groups is greater than 1, C will equal the number of input feature map divided by the groups.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X` , the equation is:
.. math::
Out = \sigma (W \ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with 'NCL' format or 'NLC' format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCK] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 1-D ``Tensor`` with shape [M].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, L_{in})`
Kernel shape: :math:`(C_{out}, C_{in}, K)`
- Output:
Output shape: :math:`(N, C_{out}, L_{out})`
Where
.. math::
L_{out}&= \frac{(L_{in} + 2 * padding - (dilation * (L_f - 1) + 1))}{stride} + 1 \\
Parameters:
in_channels(int): The number of channels in the input image.
out_channels(int): The number of filter. It is as same as the output
feature map.
kernel_size (int|tuple|list): The filter size. If kernel_size is a tuple/list,
it must contain one integer, (kernel_size).
stride (int|tuple|list, optional): The stride size. If stride is a tuple/list, it must
contain one integer, (stride_size). Default: 1.
padding(int|str|tuple|list, optional): The size of zeros to be padded. It must be in one of the following forms.
1. a string in ['valid', 'same'].
2. an int, which means the feature map is zero paded by size of `padding` on both sides.
3. a list[int] or tuple[int] whose length is 1, which means the feature map is zero paded by size of `padding[0]` on both sides.
The default value is 0.
dilation (int|tuple|list, optional): The dilation size. If dilation is a tuple/list, it must
contain one integer, (dilation_size). Default: 1.
groups (int, optional): The groups number of the conv2d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: 1.
padding_mode(str, optional): Four modes: 'zeros', 'reflect', 'replicate', 'circular'.
When in 'zeros' mode, this op uses zeros to pad the input tensor.
When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
When in 'replicate' mode, uses input boundaries to pad the input tensor.
When in 'circular' mode, uses circular input to pad the input tensor.
Default is 'zeros'.
weight_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv1d. If it is set to None or one attribute of ParamAttr, conv1d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv1d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv1d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
Attribute:
**weight** (Parameter): the learnable weights of filter of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Shape:
- x: 3-D tensor with shape: (batch, in_channels, length) or (batch, length, in_channels).
- weight: 3-D tensor with shape: (out_channels, in_channels, kernel_size)
- bias: 1-D tensor with shape: (out_channels)
- output: 3-D tensor with same shape as input x.
Raises:
None
Examples:
.. code-block:: python
import paddle
from paddle.nn import Conv1D
import numpy as np
x = np.array([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]]).astype(np.float32)
w=np.array(
[[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]]).astype(np.float32)
x_t = paddle.to_tensor(x)
conv = Conv1D(3, 2, 3)
conv.weight.set_value(w)
y_t = conv(x_t)
print(y_t)
# [[[133. 238.]
# [160. 211.]]]
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCL"):
super(Conv1D, self).__init__(in_channels,
out_channels,
kernel_size,
False,
1,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
padding = 0
if self._padding_mode != "zeros":
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
else:
padding = self._padding
out = F.conv1d(x,
self.weight,
bias=self.bias,
padding=padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format)
return out
class Conv1DTranspose(_ConvNd):
r"""
This interface is used to construct a callable object of the ``Conv1DTranspose`` class.
For more details, refer to code examples.
The 1-D convolution transpose layer calculates the output based on the input,
filter, and dilation, stride, padding. Input(Input) and output(Output)
are in 'NCL' format or 'NLC' where N is batch size, C is the number of channels,
L is the length of the feature. The details of convolution transpose
layer, please refer to the following explanation and references
`therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
Where:
* :math:`X`: Input value, a 3-D Tensor with 'NCL' format or 'NLC' format.
* :math:`W`: Kernel value, a 3-D Tensor with 'MCK' format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, a 3-D Tensor with data format 'NCL' of 'NLC', the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, L_{in})`
Filter shape: :math:`(C_{in}, C_{out}, L_f)`
- Output:
Output shape: :math:`(N, C_{out}, L_{out})`
Where
.. math::
L^\prime_{out} &= (L_{in} - 1) * stride - pad_top - pad_bottom + dilation * (L_f - 1) + 1 \\\\
L_{out} &\in [ L^\prime_{out}, L^\prime_{out} + stride ]
Note:
The conv1d_transpose can be seen as the backward of the conv1d. For conv1d,
when stride > 1, conv1d maps multiple input shape to the same output shape,
so for conv1d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`L_{out} = L^\prime_{out}`;
else, the :math:`L_{out}` of the output size must between :math:`L^\prime_{out}`
and :math:`L^\prime_{out} + stride`.
Args:
in_channels(int): The number of channels in the input image.
out_channels(int): The number of the filter. It is as same as the output
feature map.
kernel_size(int|tuple|list, optional): The filter size. If kernel_size is a tuple/list,
it must contain one integers, (kernel_size). None if
use output size to calculate kernel_size. Default: None. kernel_size and
output_size should not be None at the same time.
stride(int|tuple|list, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple/list, it must contain one integer, (stride_size).
Default: stride = 1.
padding(int|list|str|tuple, optional): The padding size. The padding argument effectively adds
`dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a
string, either 'VALID' or 'SAME' supported, which is the padding algorithm.
If `padding` is a tuple or list, it could be in two forms:
`[pad]` or `[pad_left, pad_right]`. Default: padding = 0.
output_padding(int|list|tuple, optional): The count of zeros to be added to tail of each dimension.
If it is a tuple/list, it must contain one integer. Default: 0.
groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups = 1.
bias(bool, optional): Whether to use bias. Default: True.
dilation(int|tuple|list, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple/list, it must contain one integer, (dilation_size).
Default: dilation = 1.
weight_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv1d_transpose. If it is set to None or one attribute of ParamAttr, conv1d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv1d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv1d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Shape:
- x(Tensor): 3-D tensor with shape (batch, in_channels, length) when data_format is "NCL" or shape (batch, length, in_channels) when data_format is "NLC".
- weight(Tensor): 3-D tensor with shape (in_channels, out_channels, kernel_length).
- bias(Tensor): 1-D tensor with shape (out_channels).
- output_size(int|tuple|list, optional): The output image size. If output size is a tuple/list, it must contain one integer, (feature_length). None if use kernel_size, padding, output_padding and stride to calculate output_size. If output_size and kernel_size are specified at the same time, They should follow the formula above. Default: None. output_size and kernel_size should not be None at the same time.
- output(Tensor): 3-D tensor with same shape as input x.
Examples:
.. code-block:: python
import paddle
from paddle.nn import Conv1DTranspose
import numpy as np
# shape: (1, 2, 4)
x=np.array([[[4, 0, 9, 7],
[8, 0, 9, 2]]]).astype(np.float32)
# shape: (2, 1, 2)
y=np.array([[[7, 0]],
[[4, 2]]]).astype(np.float32)
x_t = paddle.to_tensor(x)
conv = Conv1DTranspose(2, 1, 2)
conv.weight.set_value(y)
y_t = conv(x_t)
print(y_t)
# [[[60. 16. 99. 75. 4.]]]
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
weight_attr=None,
bias_attr=None,
data_format="NCL"):
super(Conv1DTranspose, self).__init__(in_channels,
out_channels,
kernel_size,
True,
1,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
out = F.conv1d_transpose(x,
self.weight,
bias=self.bias,
output_size=output_size,
output_padding=self.output_padding,
padding=self._padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format)
return out
class Conv2D(_ConvNd):
r"""
This interface is used to construct a callable object of the ``Conv2D`` class.
For more details, refer to code examples.
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW format, where N is batch size, C is the number of
the feature map, H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [MCHW] , where M is the number of output feature map,
C is the number of input feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more details.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 1-D ``Tensor`` with shape [M].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Parameters:
in_channels(int): The number of input channels in the input image.
out_channels(int): The number of output channels produced by the convolution.
kernel_size(int|list|tuple, optional): The size of the convolving kernel.
stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
contain three integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. The default value is 1.
padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.
1. a string in ['valid', 'same'].
2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding`
3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].
4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.
5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).
The default value is 0.
dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. The default value is 1.
padding_mode(str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``.
weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`. The default value is None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
data_format(str, optional): Data format that specifies the layout of input.
It can be "NCHW" or "NHWC". Default: "NCHW".
Attribute:
**weight** (Parameter): the learnable weights of filter of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Shape:
- x: :math:`(N, C_{in}, H_{in}, W_{in})`
- weight: :math:`(C_{out}, C_{in}, K_{h}, K_{w})`
- bias: :math:`(C_{out})`
- output: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (kernel\_size[0] - 1) + 1))}{strides[0]} + 1
W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (kernel\_size[1] - 1) + 1))}{strides[1]} + 1
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv2D(4, 6, (3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(Conv2D, self).__init__(in_channels,
out_channels,
kernel_size,
False,
2,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
if self._padding_mode != 'zeros':
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
out = F.conv._conv_nd(x,
self.weight,
bias=self.bias,
stride=self._stride,
padding=self._updated_padding,
padding_algorithm=self._padding_algorithm,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format,
channel_dim=self._channel_dim,
op_type=self._op_type,
use_cudnn=self._use_cudnn)
return out
class Conv2DTranspose(_ConvNd):
r"""
This interface is used to construct a callable object of the ``Conv2DTranspose`` class.
For more details, refer to code examples.
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input and output
are in NCHW format. Where N is batch size, C is the number of feature map,
H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [CMHW] , where C is the number of input feature map,
M is the number of output feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
The details of convolution transpose layer, please refer to the following explanation and references
`conv2dtranspose <https://arxiv.org/pdf/1603.07285.pdf>`_ .
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [CMHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 1-D ``Tensor`` with shape [M].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Parameters:
in_channels(int): The number of channels in the input image.
out_channels(int): The number of channels produced by the convolution.
kernel_size(int|list|tuple): The kernel size. If kernel_size is a list/tuple,
it must contain two integers, (kernel_size_H, kernel_size_W).
Otherwise, the kernel will be a square.
stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: 1.
padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.
1. a string in ['valid', 'same'].
2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` on both sides
3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].
4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.
5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).
The default value is 0.
output_padding(int|list|tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0.
dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: 1.
weight_attr(ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr(ParamAttr|bool, optional): The attribute for the bias of conv2d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
data_format(str, optional): Data format that specifies the layout of input.
It can be "NCHW" or "NHWC". Default: "NCHW".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Shape:
- x: :math:`(N, C_{in}, H_{in}, W_{in})`
- weight: :math:`(C_{in}, C_{out}, K_{h}, K_{w})`
- bias: :math:`(C_{out})`
- output: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (kernel\_size[0] - 1) + 1
W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (kernel\_size[1] - 1) + 1
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] )
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] )
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv2DTranspose(4, 6, (3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(Conv2DTranspose, self).__init__(in_channels,
out_channels,
kernel_size,
True,
2,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
if output_size is None:
output_padding = self.output_padding
else:
output_padding = 0
out = F.conv2d_transpose(x,
self.weight,
bias=self.bias,
padding=self._padding,
output_padding=output_padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
output_size=output_size,
data_format=self._data_format)
return out
class Conv3D(_ConvNd):
r"""
**Convlution3d Layer**
The convolution3d layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional tensors with a shape of
:math:`[N, C, D, H, W]` . Where N is batch size, C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Parameters:
in_channels(int): The number of input channels in the input image.
out_channels(int): The number of output channels produced by the convolution.
kernel_size(int|list|tuple, optional): The size of the convolving kernel.
stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the
stride_D = stride_H = stride_W = stride. The default value is 1.
padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.
1. a string in ['valid', 'same'].
2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding`
3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].
4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.
5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).
The default value is 0.
dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. The default value is 1.
padding_mode(str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``.
weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`. The default value is None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
data_format(str, optional): Data format that specifies the layout of input.
It can be "NCDHW" or "NDHWC". Default: "NCDHW".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Shape:
- x: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
- weight: :math:`(C_{out}, C_{in}, K_{d}, K_{h}, K_{w})`
- bias: :math:`(C_{out})`
- output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (kernel\_size[0] - 1) + 1))}{strides[0]} + 1
H_{out}&= \frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (kernel\_size[1] - 1) + 1))}{strides[1]} + 1
W_{out}&= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (kernel\_size[2] - 1) + 1))}{strides[2]} + 1
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv3D(4, 6, (3, 3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6, 6)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCDHW"):
super(Conv3D, self).__init__(in_channels,
out_channels,
kernel_size,
False,
3,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
if self._padding_mode != 'zeros':
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
out = F.conv._conv_nd(x,
self.weight,
bias=self.bias,
stride=self._stride,
padding=self._updated_padding,
padding_algorithm=self._padding_algorithm,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format,
channel_dim=self._channel_dim,
op_type=self._op_type,
use_cudnn=self._use_cudnn)
return out
class Conv3DTranspose(_ConvNd):
r"""
**Convlution3D transpose layer**
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW format.
* :math:`W`: Filter value, a tensor with CMDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
**Note**:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv3d_transpose can compute the kernel size automatically.
Parameters:
in_channels(int): The number of channels in the input image.
out_channels(int): The number of channels produced by the convolution.
kernel_size(int|list|tuple): The kernel size. If kernel_size is a list/tuple,
it must contain three integers, (kernel_size_D, kernel_size_H, kernel_size_W).
Otherwise, the kernel will be a square.
stride(int|list|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a list/tuple, it must contain three integers, (stride_depth, stride_height,
stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
The default value is 1.
padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.
1. a string in ['valid', 'same'].
2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding`
3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].
4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.
5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).
The default value is 0.
output_padding(int|list|tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0.
dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
The default value is 1.
weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. The default value is None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
data_format(str, optional): Data format that specifies the layout of input.
It can be "NCDHW" or "NDHWC". Default: "NCDHW".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Shape:
- x: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
- weight: :math:`(C_{in}, C_{out}, K_{d}, K_{h}, K_{w})`
- bias: :math:`(C_{out})`
- output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (kernel\_size[0] - 1) + 1
H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (kernel\_size[1] - 1) + 1
W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (kernel\_size[2] - 1) + 1
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv3DTranspose(4, 6, (3, 3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10, 10)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCDHW"):
super(Conv3DTranspose, self).__init__(in_channels,
out_channels,
kernel_size,
True,
3,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
if output_size is None:
output_padding = self.output_padding
else:
output_padding = 0
out = F.conv3d_transpose(x,
self.weight,
bias=self.bias,
padding=self._padding,
output_padding=output_padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
output_size=output_size,
data_format=self._data_format)
return out
| 47.974403 | 417 | 0.571426 |
import numpy as np
from paddle import get_flags
from ...device import get_cudnn_version
from .. import Layer
from ..initializer import Normal
from .. import functional as F
from ...fluid.layers import utils
from ..functional.conv import _update_padding_nd
from ...device import is_compiled_with_cuda
from ...device import is_compiled_with_rocm
__all__ = []
def _get_default_param_initializer(num_channels, filter_size):
filter_elem_num = num_channels * np.prod(filter_size)
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std)
def _reverse_repeat_list(t, n):
return list(x for x in reversed(t) for _ in range(n))
class _ConvNd(Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
transposed,
dims,
stride=1,
padding=0,
padding_mode='zeros',
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(_ConvNd, self).__init__()
assert weight_attr is not False, "weight_attr should not be False in Conv."
self._param_attr = weight_attr
self._bias_attr = bias_attr
self._groups = groups
self._in_channels = in_channels
self._out_channels = out_channels
self._data_format = data_format
valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
if padding_mode not in valid_padding_modes:
raise ValueError(
"padding_mode must be one of {}, but got padding_mode='{}'".
format(valid_padding_modes, padding_mode))
if padding_mode in {'reflect', 'replicate', 'circular'
} and not isinstance(padding, np.int):
raise TypeError(
"when padding_mode in ['reflect', 'replicate', 'circular'], type of padding must be int"
)
valid_format = {'NHWC', 'NCHW', 'NDHWC', 'NCDHW', 'NLC', 'NCL'}
if data_format not in valid_format:
raise ValueError(
"data_format must be one of {}, but got data_format='{}'".
format(valid_format, data_format))
channel_last = (data_format == "NHWC") or (data_format
== "NDHWC") or (data_format
== "NLC")
if channel_last:
self._channel_dim = len(data_format) - 1
else:
self._channel_dim = 1
self._stride = utils.convert_to_list(stride, dims, 'stride')
self._dilation = utils.convert_to_list(dilation, dims, 'dilation')
self._kernel_size = utils.convert_to_list(kernel_size, dims,
'kernel_size')
self._padding = padding
self._padding_mode = padding_mode
self.output_padding = output_padding
if dims != 1:
self._updated_padding, self._padding_algorithm = _update_padding_nd(
padding, channel_last, dims)
if transposed:
filter_shape = [self._in_channels, out_channels // groups
] + self._kernel_size
else:
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups.")
if padding_mode in {'reflect', 'replicate', 'circular'}:
_paired_padding = utils.convert_to_list(padding, dims,
'padding')
self._reversed_padding_repeated_twice = _reverse_repeat_list(
_paired_padding, 2)
self._updated_padding, self._padding_algorithm = _update_padding_nd(
0, channel_last, dims)
filter_shape = [out_channels, in_channels // groups
] + self._kernel_size
def _get_default_param_initializer():
if transposed:
return None
filter_elem_num = np.prod(self._kernel_size) * self._in_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std)
self.weight = self.create_parameter(
shape=filter_shape,
attr=self._param_attr,
default_initializer=_get_default_param_initializer())
self.bias = self.create_parameter(attr=self._bias_attr,
shape=[self._out_channels],
is_bias=True)
cudnn_version = get_cudnn_version()
self._use_cudnn = True if (is_compiled_with_cuda()
and cudnn_version is not None) else False
self._op_type = "conv" + str(dims) + 'd'
if self._op_type == 'conv2d' and (in_channels == groups
and in_channels != 1
and out_channels % in_channels == 0):
self._op_type = 'depthwise_conv2d'
if is_compiled_with_rocm():
self._use_cudnn = True
else:
self._use_cudnn = False
if (is_compiled_with_cuda() and get_flags("FLAGS_conv2d_disable_cudnn")
["FLAGS_conv2d_disable_cudnn"]):
self._use_cudnn = False
def extra_repr(self):
main_str = '{_in_channels}, {_out_channels}, kernel_size={_kernel_size}'
if self._stride != [1] * len(self._stride):
main_str += ', stride={_stride}'
if self._padding != 0:
main_str += ', padding={_padding}'
if self._padding_mode != 'zeros':
main_str += ', padding_mode={_padding_mode}'
if self.output_padding != 0:
main_str += ', output_padding={output_padding}'
if self._dilation != [1] * len(self._dilation):
main_str += ', dilation={_dilation}'
if self._groups != 1:
main_str += ', groups={_groups}'
main_str += ', data_format={_data_format}'
return main_str.format(**self.__dict__)
class Conv1D(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCL"):
super(Conv1D, self).__init__(in_channels,
out_channels,
kernel_size,
False,
1,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
padding = 0
if self._padding_mode != "zeros":
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
else:
padding = self._padding
out = F.conv1d(x,
self.weight,
bias=self.bias,
padding=padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format)
return out
class Conv1DTranspose(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
weight_attr=None,
bias_attr=None,
data_format="NCL"):
super(Conv1DTranspose, self).__init__(in_channels,
out_channels,
kernel_size,
True,
1,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
out = F.conv1d_transpose(x,
self.weight,
bias=self.bias,
output_size=output_size,
output_padding=self.output_padding,
padding=self._padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format)
return out
class Conv2D(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(Conv2D, self).__init__(in_channels,
out_channels,
kernel_size,
False,
2,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
if self._padding_mode != 'zeros':
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
out = F.conv._conv_nd(x,
self.weight,
bias=self.bias,
stride=self._stride,
padding=self._updated_padding,
padding_algorithm=self._padding_algorithm,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format,
channel_dim=self._channel_dim,
op_type=self._op_type,
use_cudnn=self._use_cudnn)
return out
class Conv2DTranspose(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(Conv2DTranspose, self).__init__(in_channels,
out_channels,
kernel_size,
True,
2,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
if output_size is None:
output_padding = self.output_padding
else:
output_padding = 0
out = F.conv2d_transpose(x,
self.weight,
bias=self.bias,
padding=self._padding,
output_padding=output_padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
output_size=output_size,
data_format=self._data_format)
return out
class Conv3D(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCDHW"):
super(Conv3D, self).__init__(in_channels,
out_channels,
kernel_size,
False,
3,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
if self._padding_mode != 'zeros':
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
out = F.conv._conv_nd(x,
self.weight,
bias=self.bias,
stride=self._stride,
padding=self._updated_padding,
padding_algorithm=self._padding_algorithm,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format,
channel_dim=self._channel_dim,
op_type=self._op_type,
use_cudnn=self._use_cudnn)
return out
class Conv3DTranspose(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCDHW"):
super(Conv3DTranspose, self).__init__(in_channels,
out_channels,
kernel_size,
True,
3,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
if output_size is None:
output_padding = self.output_padding
else:
output_padding = 0
out = F.conv3d_transpose(x,
self.weight,
bias=self.bias,
padding=self._padding,
output_padding=output_padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
output_size=output_size,
data_format=self._data_format)
return out
| true | true |
f724fa482a2e003d04725bb8120a96a0f5ea185d | 232 | py | Python | torpedo/dialects/torpedo-vertica/tests/connection_config.py | darKoram/torpedo | fbda29225044e946465bae3782a3071d0d1a2fe3 | [
"MIT"
] | 1 | 2015-02-28T14:42:57.000Z | 2015-02-28T14:42:57.000Z | torpedo/dialects/torpedo-vertica/tests/connection_config.py | darKoram/torpedo | fbda29225044e946465bae3782a3071d0d1a2fe3 | [
"MIT"
] | null | null | null | torpedo/dialects/torpedo-vertica/tests/connection_config.py | darKoram/torpedo | fbda29225044e946465bae3782a3071d0d1a2fe3 | [
"MIT"
] | null | null | null | drivername = 'vertica+pyodbc'
username = 'your_name'
host = 'your_host_ip_or_hostname'
database = 'your_db_name'
# odbcinst.ini entry [vertica_deploy_test_db]
odbcpath = '/path/to/your/odbc.ini'
datasource = 'your_odbc.ini_section'
| 29 | 45 | 0.780172 | drivername = 'vertica+pyodbc'
username = 'your_name'
host = 'your_host_ip_or_hostname'
database = 'your_db_name'
odbcpath = '/path/to/your/odbc.ini'
datasource = 'your_odbc.ini_section'
| true | true |
f724fa517398283eeaa453c0a6afffa1631cdf46 | 3,846 | py | Python | TA-linode/bin/ta_linode/aob_py3/splunktalib/common/log.py | jriddle-linode/splunk-addon-linode | 5954acd12ef88ab991365ef51072db68aed46aa1 | [
"Apache-2.0"
] | 11 | 2020-01-23T11:32:26.000Z | 2021-09-23T09:24:02.000Z | TA-linode/bin/ta_linode/aob_py3/splunktalib/common/log.py | jriddle-linode/splunk-addon-linode | 5954acd12ef88ab991365ef51072db68aed46aa1 | [
"Apache-2.0"
] | 26 | 2019-07-15T02:38:22.000Z | 2021-12-01T04:14:17.000Z | TA-linode/bin/ta_linode/aob_py3/splunktalib/common/log.py | jriddle-linode/splunk-addon-linode | 5954acd12ef88ab991365ef51072db68aed46aa1 | [
"Apache-2.0"
] | 6 | 2019-07-14T17:44:06.000Z | 2020-11-17T17:33:23.000Z | # SPDX-FileCopyrightText: 2020 Splunk Inc.
#
# SPDX-License-Identifier: Apache-2.0
"""
Copyright (C) 2005-2019 Splunk Inc. All Rights Reserved.
log utility for TA
"""
from builtins import object
import logging
import logging.handlers as handlers
import os.path as op
from splunktalib.splunk_platform import make_splunkhome_path
import splunktalib.common.util as cutil
from splunktalib.common.pattern import singleton
import time
logging.Formatter.converter = time.gmtime
def log_enter_exit(logger):
"""
Log decorator to log function enter and exit
"""
def log_decorator(func):
def wrapper(*args, **kwargs):
logger.debug("{} entered.".format(func.__name__))
result = func(*args, **kwargs)
logger.debug("{} exited.".format(func.__name__))
return result
return wrapper
return log_decorator
@singleton
class Logs(object):
def __init__(self, namespace=None, default_level=logging.INFO):
self._loggers = {}
self._default_level = default_level
if namespace is None:
namespace = cutil.get_appname_from_path(op.abspath(__file__))
if namespace:
namespace = namespace.lower()
self._namespace = namespace
def get_logger(self, name, level=None, maxBytes=25000000, backupCount=5):
"""
Set up a default logger.
:param name: The log file name.
:param level: The logging level.
:param maxBytes: The maximum log file size before rollover.
:param backupCount: The number of log files to retain.
"""
# Strip ".py" from the log file name if auto-generated by a script.
if level is None:
level = self._default_level
name = self._get_log_name(name)
if name in self._loggers:
return self._loggers[name]
logfile = make_splunkhome_path(["var", "log", "splunk", name])
logger = logging.getLogger(name)
handler_exists = any(
[True for h in logger.handlers if h.baseFilename == logfile]
)
if not handler_exists:
file_handler = handlers.RotatingFileHandler(
logfile, mode="a", maxBytes=maxBytes, backupCount=backupCount
)
formatter = logging.Formatter(
"%(asctime)s +0000 log_level=%(levelname)s, pid=%(process)d, tid=%(threadName)s, "
"file=%(filename)s, func_name=%(funcName)s, code_line_no=%(lineno)d | %(message)s"
)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(level)
logger.propagate = False
self._loggers[name] = logger
return logger
def set_level(self, level, name=None):
"""
Change the log level of the logging
:param level: the level of the logging to be setLevel
:param name: the name of the logging to set, in case it is not set,
all the loggers will be affected
"""
if name is not None:
name = self._get_log_name(name)
logger = self._loggers.get(name)
if logger is not None:
logger.setLevel(level)
else:
self._default_level = level
for logger in self._loggers.values():
logger.setLevel(level)
def _get_log_name(self, name):
if name.endswith(".py"):
name = name.replace(".py", "")
if self._namespace:
name = "{}_{}.log".format(self._namespace, name)
else:
name = "{}.log".format(name)
return name
# Global logger
logger = Logs().get_logger("util")
def reset_logger(name):
"""
Reset global logger.
"""
global logger
logger = Logs().get_logger(name)
| 28.488889 | 98 | 0.608684 |
from builtins import object
import logging
import logging.handlers as handlers
import os.path as op
from splunktalib.splunk_platform import make_splunkhome_path
import splunktalib.common.util as cutil
from splunktalib.common.pattern import singleton
import time
logging.Formatter.converter = time.gmtime
def log_enter_exit(logger):
def log_decorator(func):
def wrapper(*args, **kwargs):
logger.debug("{} entered.".format(func.__name__))
result = func(*args, **kwargs)
logger.debug("{} exited.".format(func.__name__))
return result
return wrapper
return log_decorator
@singleton
class Logs(object):
def __init__(self, namespace=None, default_level=logging.INFO):
self._loggers = {}
self._default_level = default_level
if namespace is None:
namespace = cutil.get_appname_from_path(op.abspath(__file__))
if namespace:
namespace = namespace.lower()
self._namespace = namespace
def get_logger(self, name, level=None, maxBytes=25000000, backupCount=5):
if level is None:
level = self._default_level
name = self._get_log_name(name)
if name in self._loggers:
return self._loggers[name]
logfile = make_splunkhome_path(["var", "log", "splunk", name])
logger = logging.getLogger(name)
handler_exists = any(
[True for h in logger.handlers if h.baseFilename == logfile]
)
if not handler_exists:
file_handler = handlers.RotatingFileHandler(
logfile, mode="a", maxBytes=maxBytes, backupCount=backupCount
)
formatter = logging.Formatter(
"%(asctime)s +0000 log_level=%(levelname)s, pid=%(process)d, tid=%(threadName)s, "
"file=%(filename)s, func_name=%(funcName)s, code_line_no=%(lineno)d | %(message)s"
)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(level)
logger.propagate = False
self._loggers[name] = logger
return logger
def set_level(self, level, name=None):
if name is not None:
name = self._get_log_name(name)
logger = self._loggers.get(name)
if logger is not None:
logger.setLevel(level)
else:
self._default_level = level
for logger in self._loggers.values():
logger.setLevel(level)
def _get_log_name(self, name):
if name.endswith(".py"):
name = name.replace(".py", "")
if self._namespace:
name = "{}_{}.log".format(self._namespace, name)
else:
name = "{}.log".format(name)
return name
logger = Logs().get_logger("util")
def reset_logger(name):
global logger
logger = Logs().get_logger(name)
| true | true |
f724fb20745e78bebd0c20e4126667f97df1a297 | 1,882 | py | Python | toontown/chat/ToonChatGarbler.py | philicheese2003/ToontownProjectAltisServer | cfa225d1bdddacdbd29b621382347fce17e1dc66 | [
"Apache-2.0"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | toontown/chat/ToonChatGarbler.py | kool601/Project-Altis-Educational-Source | 0a74999fb52d4e690a41b984703119f63c372d20 | [
"Apache-2.0"
] | null | null | null | toontown/chat/ToonChatGarbler.py | kool601/Project-Altis-Educational-Source | 0a74999fb52d4e690a41b984703119f63c372d20 | [
"Apache-2.0"
] | 2 | 2017-12-20T17:46:56.000Z | 2021-06-25T02:56:36.000Z | import string
import random
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
from otp.chat import ChatGarbler
class ToonChatGarbler(ChatGarbler.ChatGarbler):
animalSounds = {'dog': TTLocalizer.ChatGarblerDog,
'cat': TTLocalizer.ChatGarblerCat,
'mouse': TTLocalizer.ChatGarblerMouse,
'horse': TTLocalizer.ChatGarblerHorse,
'rabbit': TTLocalizer.ChatGarblerRabbit,
'duck': TTLocalizer.ChatGarblerDuck,
'monkey': TTLocalizer.ChatGarblerMonkey,
'bear': TTLocalizer.ChatGarblerBear,
'pig': TTLocalizer.ChatGarblerPig,
'deer': TTLocalizer.ChatGarblerDeer,
'default': OTPLocalizer.ChatGarblerDefault}
def garble(self, toon, message):
newMessage = ''
animalType = toon.getStyle().getType()
if animalType in ToonChatGarbler.animalSounds:
wordlist = ToonChatGarbler.animalSounds[animalType]
else:
wordlist = ToonChatGarbler.animalSounds['default']
numWords = random.randint(1, 7)
for i in xrange(1, numWords + 1):
wordIndex = random.randint(0, len(wordlist) - 1)
newMessage = newMessage + wordlist[wordIndex]
if i < numWords:
newMessage = newMessage + ' '
return newMessage
def garbleSingle(self, toon, message):
newMessage = ''
animalType = toon.getStyle().getType()
if animalType in ToonChatGarbler.animalSounds:
wordlist = ToonChatGarbler.animalSounds[animalType]
else:
wordlist = ToonChatGarbler.animalSounds['default']
numWords = 1
for i in xrange(1, numWords + 1):
wordIndex = random.randint(0, len(wordlist) - 1)
newMessage = newMessage + wordlist[wordIndex]
if i < numWords:
newMessage = newMessage + ' '
return newMessage
| 36.901961 | 63 | 0.652497 | import string
import random
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
from otp.chat import ChatGarbler
class ToonChatGarbler(ChatGarbler.ChatGarbler):
animalSounds = {'dog': TTLocalizer.ChatGarblerDog,
'cat': TTLocalizer.ChatGarblerCat,
'mouse': TTLocalizer.ChatGarblerMouse,
'horse': TTLocalizer.ChatGarblerHorse,
'rabbit': TTLocalizer.ChatGarblerRabbit,
'duck': TTLocalizer.ChatGarblerDuck,
'monkey': TTLocalizer.ChatGarblerMonkey,
'bear': TTLocalizer.ChatGarblerBear,
'pig': TTLocalizer.ChatGarblerPig,
'deer': TTLocalizer.ChatGarblerDeer,
'default': OTPLocalizer.ChatGarblerDefault}
def garble(self, toon, message):
newMessage = ''
animalType = toon.getStyle().getType()
if animalType in ToonChatGarbler.animalSounds:
wordlist = ToonChatGarbler.animalSounds[animalType]
else:
wordlist = ToonChatGarbler.animalSounds['default']
numWords = random.randint(1, 7)
for i in xrange(1, numWords + 1):
wordIndex = random.randint(0, len(wordlist) - 1)
newMessage = newMessage + wordlist[wordIndex]
if i < numWords:
newMessage = newMessage + ' '
return newMessage
def garbleSingle(self, toon, message):
newMessage = ''
animalType = toon.getStyle().getType()
if animalType in ToonChatGarbler.animalSounds:
wordlist = ToonChatGarbler.animalSounds[animalType]
else:
wordlist = ToonChatGarbler.animalSounds['default']
numWords = 1
for i in xrange(1, numWords + 1):
wordIndex = random.randint(0, len(wordlist) - 1)
newMessage = newMessage + wordlist[wordIndex]
if i < numWords:
newMessage = newMessage + ' '
return newMessage
| true | true |
f724fb8f9f9d4d1e3c0793409f6c05445f76ed63 | 5,652 | py | Python | xscale/signal/tests/test_fitting.py | serazing/xscale | a804866aa6f6a5a0f293a7f6765ea17403159134 | [
"Apache-2.0"
] | 24 | 2017-02-28T15:01:29.000Z | 2022-02-22T08:26:23.000Z | xscale/signal/tests/test_fitting.py | serazing/xscale | a804866aa6f6a5a0f293a7f6765ea17403159134 | [
"Apache-2.0"
] | 19 | 2017-02-24T12:30:26.000Z | 2022-02-25T04:57:32.000Z | xscale/signal/tests/test_fitting.py | serazing/xscale | a804866aa6f6a5a0f293a7f6765ea17403159134 | [
"Apache-2.0"
] | 10 | 2017-03-04T02:59:42.000Z | 2021-11-14T12:40:54.000Z | # Python 2/3 compatibility
from __future__ import absolute_import, division, print_function
import xarray as xr
import numpy as np
import pandas as pd
import xscale.signal.fitting as xfit
def test_polyfit():
Nt, Nx, Ny = 100, 128, 128
rand = xr.DataArray(np.random.rand(Nt, Nx, Ny), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * rand.x / Nx), dims=['x'])
truth = rand + slopes * rand.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
linfit = xfit.polyfit(truth, dim='time').load()
xfit.polyfit(truth.to_dataset(name='truth'), dim='time').load()
assert np.allclose(linfit.sel(degree=1).mean(dim='y').data, slopes.data,
rtol=5e-2, atol=1e-3)
def test_linreg():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
xfit.polyfit(truth.to_dataset(name='truth'), dim='time').load()
slopes_fitted, offsets_fitted = xfit.linreg(truth, dim='time')
assert np.allclose(slopes, slopes_fitted.mean(dim='y').load())
assert np.allclose(offset, offsets_fitted.mean(dim='y').load())
def test_trend():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
trend_mean = xfit.trend(offset, dim='time', type='constant')
trend_linear = xfit.trend(truth, dim='time', type='linear')
assert np.allclose(offset, trend_mean.load())
assert np.allclose(truth, trend_linear.load())
def test_detrend():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
assert np.allclose(0 * offset, xfit.detrend(offset, dim='time',
type='constant').load())
assert np.allclose(0 * offset, xfit.detrend(truth, dim='time',
type='linear').load())
def test_sinfit():
Nt, Nx, Ny = 100, 128, 128
zeros = xr.DataArray(np.zeros((Nt, Nx, Ny)), dims=['time', 'x', 'y'])
zeros = zeros.assign_coords(time=pd.date_range(start='2011-01-01',
periods=100, freq='H'))
offset = 0.4
amp1, phi1 = 1.2, 0.
wave1 = amp1 * np.sin(2 * np.pi * zeros['time.hour'] / 24. +
phi1 * np.pi / 180.)
amp2, phi2 = 1.9, 60.
wave2 = amp2 * np.sin(2 * np.pi * zeros['time.hour'] / 12. +
phi2 * np.pi / 180.)
truth = offset + zeros + wave1 + wave2
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
# Fit both waves
fit2w = xfit.sinfit(truth, dim='time', periods=[24, 12], unit='h').load()
assert np.isclose(fit2w['amplitude'].sel(periods=24).isel(x=10, y=10), amp1)
assert np.isclose(fit2w['phase'].sel(periods=24).isel(x=10, y=10), phi1,
atol=1e-4)
assert np.isclose(fit2w['amplitude'].sel(periods=12).isel(x=10, y=10), amp2)
assert np.isclose(fit2w['phase'].sel(periods=12).isel(x=10, y=10), phi2)
assert np.isclose(fit2w['offset'].isel(x=10, y=10), offset)
# Fit only one wave (wave2)
fit1w = xfit.sinfit(truth, dim='time', periods=12, unit='h').load()
# Compare with 5% relative tolerance (error induced by wave1)
assert np.isclose(fit1w['amplitude'].sel(periods=12).isel(x=10, y=10),
amp2, rtol=5e-2)
assert np.isclose(fit1w['phase'].sel(periods=12).isel(x=10, y=10),
phi2, rtol=5e-2)
# Fit only one dimensional data
xfit.sinfit(truth.isel(x=0, y=0), dim='time',
periods=[24, 12],
unit='h').load()
def test_sinval():
Nt, Nx, Ny = 100, 128, 128
offset = 0.4
periods = [24., 12.]
amp1, phi1 = 1.2, 0.
amp2, phi2 = 1.9, 60.
time = xr.DataArray(pd.date_range(start='2011-01-01',
periods=Nt,
freq='H'),
dims='time')
amp = xr.DataArray([amp1, amp2], dims='periods')
phi = xr.DataArray([phi1, phi2], dims='periods')
ones = xr.DataArray(np.ones((Nx, Ny)), dims=['x', 'y'])
var_dict = {'amplitude': amp * ones,
'phase': phi * ones,
'offset': offset * ones}
ds = xr.Dataset(var_dict).chunk(chunks={'x': 50, 'y': 50})
ds = ds.assign_coords(periods=periods)
ds['periods'].attrs['units'] = 'h'
xfit.sinval(ds, time)
#One mode reconstruction
xfit.sinval(ds.sel(periods=[24,]), time)
def test_order_and_stack():
rand = xr.DataArray(np.random.rand(100, 128, 128), dims=['time', 'x', 'y'])
rand = rand.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
rand_stacked = xfit._order_and_stack(rand, 'y')
assert rand_stacked.dims[0] is 'y'
assert rand_stacked.dims[-1] is 'temp_dim'
assert rand_stacked.shape[-1] == 128 * 100
# Test the exception for 1d array
rand1d = rand.isel(time=0, x=0)
rand1d_stacked = xfit._order_and_stack(rand1d, 'y')
assert np.array_equal(rand1d_stacked, rand1d)
def test_unstack():
rand = xr.DataArray(np.random.rand(100, 128, 128), dims=['time', 'x', 'y'])
rand = rand.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
rand_stacked = xfit._order_and_stack(rand, 'y')
rand_unstacked = xfit._unstack(rand_stacked.mean(dim='y'))
assert rand_unstacked.dims == ('time', 'x')
assert rand_unstacked.shape == (100, 128) | 43.476923 | 77 | 0.607396 |
from __future__ import absolute_import, division, print_function
import xarray as xr
import numpy as np
import pandas as pd
import xscale.signal.fitting as xfit
def test_polyfit():
Nt, Nx, Ny = 100, 128, 128
rand = xr.DataArray(np.random.rand(Nt, Nx, Ny), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * rand.x / Nx), dims=['x'])
truth = rand + slopes * rand.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
linfit = xfit.polyfit(truth, dim='time').load()
xfit.polyfit(truth.to_dataset(name='truth'), dim='time').load()
assert np.allclose(linfit.sel(degree=1).mean(dim='y').data, slopes.data,
rtol=5e-2, atol=1e-3)
def test_linreg():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
xfit.polyfit(truth.to_dataset(name='truth'), dim='time').load()
slopes_fitted, offsets_fitted = xfit.linreg(truth, dim='time')
assert np.allclose(slopes, slopes_fitted.mean(dim='y').load())
assert np.allclose(offset, offsets_fitted.mean(dim='y').load())
def test_trend():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
trend_mean = xfit.trend(offset, dim='time', type='constant')
trend_linear = xfit.trend(truth, dim='time', type='linear')
assert np.allclose(offset, trend_mean.load())
assert np.allclose(truth, trend_linear.load())
def test_detrend():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
assert np.allclose(0 * offset, xfit.detrend(offset, dim='time',
type='constant').load())
assert np.allclose(0 * offset, xfit.detrend(truth, dim='time',
type='linear').load())
def test_sinfit():
Nt, Nx, Ny = 100, 128, 128
zeros = xr.DataArray(np.zeros((Nt, Nx, Ny)), dims=['time', 'x', 'y'])
zeros = zeros.assign_coords(time=pd.date_range(start='2011-01-01',
periods=100, freq='H'))
offset = 0.4
amp1, phi1 = 1.2, 0.
wave1 = amp1 * np.sin(2 * np.pi * zeros['time.hour'] / 24. +
phi1 * np.pi / 180.)
amp2, phi2 = 1.9, 60.
wave2 = amp2 * np.sin(2 * np.pi * zeros['time.hour'] / 12. +
phi2 * np.pi / 180.)
truth = offset + zeros + wave1 + wave2
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
fit2w = xfit.sinfit(truth, dim='time', periods=[24, 12], unit='h').load()
assert np.isclose(fit2w['amplitude'].sel(periods=24).isel(x=10, y=10), amp1)
assert np.isclose(fit2w['phase'].sel(periods=24).isel(x=10, y=10), phi1,
atol=1e-4)
assert np.isclose(fit2w['amplitude'].sel(periods=12).isel(x=10, y=10), amp2)
assert np.isclose(fit2w['phase'].sel(periods=12).isel(x=10, y=10), phi2)
assert np.isclose(fit2w['offset'].isel(x=10, y=10), offset)
fit1w = xfit.sinfit(truth, dim='time', periods=12, unit='h').load()
assert np.isclose(fit1w['amplitude'].sel(periods=12).isel(x=10, y=10),
amp2, rtol=5e-2)
assert np.isclose(fit1w['phase'].sel(periods=12).isel(x=10, y=10),
phi2, rtol=5e-2)
xfit.sinfit(truth.isel(x=0, y=0), dim='time',
periods=[24, 12],
unit='h').load()
def test_sinval():
Nt, Nx, Ny = 100, 128, 128
offset = 0.4
periods = [24., 12.]
amp1, phi1 = 1.2, 0.
amp2, phi2 = 1.9, 60.
time = xr.DataArray(pd.date_range(start='2011-01-01',
periods=Nt,
freq='H'),
dims='time')
amp = xr.DataArray([amp1, amp2], dims='periods')
phi = xr.DataArray([phi1, phi2], dims='periods')
ones = xr.DataArray(np.ones((Nx, Ny)), dims=['x', 'y'])
var_dict = {'amplitude': amp * ones,
'phase': phi * ones,
'offset': offset * ones}
ds = xr.Dataset(var_dict).chunk(chunks={'x': 50, 'y': 50})
ds = ds.assign_coords(periods=periods)
ds['periods'].attrs['units'] = 'h'
xfit.sinval(ds, time)
xfit.sinval(ds.sel(periods=[24,]), time)
def test_order_and_stack():
rand = xr.DataArray(np.random.rand(100, 128, 128), dims=['time', 'x', 'y'])
rand = rand.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
rand_stacked = xfit._order_and_stack(rand, 'y')
assert rand_stacked.dims[0] is 'y'
assert rand_stacked.dims[-1] is 'temp_dim'
assert rand_stacked.shape[-1] == 128 * 100
rand1d = rand.isel(time=0, x=0)
rand1d_stacked = xfit._order_and_stack(rand1d, 'y')
assert np.array_equal(rand1d_stacked, rand1d)
def test_unstack():
rand = xr.DataArray(np.random.rand(100, 128, 128), dims=['time', 'x', 'y'])
rand = rand.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
rand_stacked = xfit._order_and_stack(rand, 'y')
rand_unstacked = xfit._unstack(rand_stacked.mean(dim='y'))
assert rand_unstacked.dims == ('time', 'x')
assert rand_unstacked.shape == (100, 128) | true | true |
f724fdc85b1773dff69d97659ac96bcf9ba268b2 | 937 | py | Python | sql/hive/src/test/resources/data/scripts/dumpdata_script.py | OlegPt/spark | c79fd911ca85f883c493c5e888f7690868d7b5ea | [
"Apache-2.0"
] | 35,083 | 2015-01-01T03:05:13.000Z | 2022-03-31T21:57:40.000Z | sql/hive/src/test/resources/data/scripts/dumpdata_script.py | OlegPt/spark | c79fd911ca85f883c493c5e888f7690868d7b5ea | [
"Apache-2.0"
] | 32,117 | 2015-01-01T00:00:24.000Z | 2022-03-31T23:54:58.000Z | sql/hive/src/test/resources/data/scripts/dumpdata_script.py | OlegPt/spark | c79fd911ca85f883c493c5e888f7690868d7b5ea | [
"Apache-2.0"
] | 29,687 | 2015-01-01T02:40:43.000Z | 2022-03-31T16:49:33.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
for i in range(50):
for j in range(5):
for k in range(20022):
print(20000 * i + k)
for line in sys.stdin:
pass
| 33.464286 | 61 | 0.736393 |
import sys
for i in range(50):
for j in range(5):
for k in range(20022):
print(20000 * i + k)
for line in sys.stdin:
pass
| true | true |
f724feb3c1b587e44e19364e647668548b195782 | 860 | py | Python | grnglow/glow/views/home.py | xiaokai111/green-glow | 19e399c32ee4d3cfc026684b06f49df9b7dd10d5 | [
"MIT"
] | 18 | 2016-03-19T10:57:43.000Z | 2021-10-10T07:52:51.000Z | grnglow/glow/views/home.py | xiaokai111/green-glow | 19e399c32ee4d3cfc026684b06f49df9b7dd10d5 | [
"MIT"
] | 3 | 2019-06-13T03:15:11.000Z | 2020-06-05T18:16:52.000Z | grnglow/glow/views/home.py | xiaokai111/green-glow | 19e399c32ee4d3cfc026684b06f49df9b7dd10d5 | [
"MIT"
] | 11 | 2017-05-15T14:24:17.000Z | 2021-10-10T07:52:56.000Z | # -*- encoding: utf-8 -*-
'''
Created on 2012-3-23
@author: Neil
'''
from django.shortcuts import render_to_response
from grnglow.glow.views import people
from grnglow.glow.models.photo import Photo
def base(request):
return render_to_response('base.html')
def index(request):
if request.user.is_authenticated():
# 默认情况下,people.home(request,user_id)的user_id参数应该为字符串
return people.home(request, str(request.user.id)) # 如果已登录,跳转到我的个人页
# return render_to_response('index.html', {'request':request})
else:
photos = Photo.objects.all().order_by('-score')[0:12] # 按得分倒序,最大的排在前面
p_len = len(photos)
p_items = []
for i in range(0, p_len, 6):
p_items.extend([photos[i:i + 6]]) # 在末端添加列表元素
return render_to_response('index.html', {'request': request, 'p_items': p_items})
| 28.666667 | 89 | 0.660465 |
from django.shortcuts import render_to_response
from grnglow.glow.views import people
from grnglow.glow.models.photo import Photo
def base(request):
return render_to_response('base.html')
def index(request):
if request.user.is_authenticated():
return people.home(request, str(request.user.id))
else:
photos = Photo.objects.all().order_by('-score')[0:12]
p_len = len(photos)
p_items = []
for i in range(0, p_len, 6):
p_items.extend([photos[i:i + 6]])
return render_to_response('index.html', {'request': request, 'p_items': p_items})
| true | true |
f72500addb9c5aa51a6fb2310b80123201744064 | 5,721 | py | Python | parsers/file_name_validators.py | ddexnet/dsrf | 1dc231ef911e9ee4fbf2fae77ceaef08755f3f7e | [
"Apache-2.0"
] | 34 | 2016-04-28T13:35:50.000Z | 2022-02-21T08:25:21.000Z | parsers/file_name_validators.py | ddexnet/dsrf | 1dc231ef911e9ee4fbf2fae77ceaef08755f3f7e | [
"Apache-2.0"
] | 2 | 2020-02-07T16:37:19.000Z | 2021-01-13T16:57:40.000Z | parsers/file_name_validators.py | ddexnet/dsrf | 1dc231ef911e9ee4fbf2fae77ceaef08755f3f7e | [
"Apache-2.0"
] | 16 | 2016-05-20T12:30:20.000Z | 2022-03-24T13:44:16.000Z | # Lint as: python2, python3
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Objects to validate a single file name in a dsrf report."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
from dsrf import constants
from dsrf import error
class FileNameValidator(object):
"""A single file name validator."""
def __init__(self, expected_components):
self.expected_components = expected_components
def validate_value(self, file_name):
"""Validates that a filename consists of the expected components.
Args:
file_name: File name to validate.
Returns:
A dictionary of {component_name = component_value}
(eg. {'ServiceDescription': 'AdSupport'}).
"""
warnings = set()
file_name_dict = self.split_file_name(file_name, self.expected_components)
try:
self.validate_xofy(file_name_dict['x'], file_name_dict['y'], file_name)
self.validate_prefix(file_name_dict['DSR'], file_name,)
self.validate_suffix(file_name_dict['ext'], file_name)
self.validate_message_notification_period(
file_name_dict['MessageNotificationPeriod'], file_name)
self.validate_territory_of_use_or_sale(
file_name_dict['TerritoryOfUseOrSale'], file_name)
self.validate_message_created_datetime(
file_name_dict['MessageCreatedDateTime'], file_name)
except KeyError:
raise error.FileNameValidationFailure(
file_name, 'bad name structure, expected format: %s.' %
constants.FILE_NAME_FORMAT)
except error.FileNameValidationWarning as e:
warnings.add(e)
return file_name_dict, warnings
@classmethod
def validate_xofy(cls, x, y, file_name):
try:
if int(x) <= int(y):
return x, y
except ValueError:
pass
raise error.FileNameValidationFailure(
file_name, 'File number is not an integer or does not exist.')
@classmethod
def validate_prefix(cls, prefix, file_name):
if prefix != constants.FILE_NAME_PREFIX:
raise error.FileNameValidationFailure(
file_name, 'File name should start with %s.' %
constants.FILE_NAME_PREFIX)
return prefix
@classmethod
def validate_suffix(cls, suffix, file_name):
if suffix not in constants.SUPPORTED_FILE_EXTENSIONS:
raise error.FileNameValidationFailure(
file_name, 'Suffix "%s" is not valid, supported suffixes: %s.' % (
suffix, constants.SUPPORTED_FILE_EXTENSIONS))
return suffix
@classmethod
def validate_message_notification_period(cls, mnp, file_name):
if not constants.MESSAGE_NOTIFICATION_PERIOD_PATTERN.match(mnp):
raise error.FileNameValidationFailure(
file_name, 'Message Notification Period "%s" is invalid, should be '
'ISO 8601:2004 period format.' % mnp)
return mnp
@classmethod
def validate_territory_of_use_or_sale(cls, touos, file_name):
"""TerritoryOfUseOrSale may also be freeform, so this is just a warning."""
if not constants.TERRITORY_OF_USE_OR_SALE_PATTERN.match(touos):
raise error.FileNameValidationWarning(
file_name,
'It is recommended that the TerritoryOfUseOrSale be set to a '
'CISAC TIS code or a two-letter ISO code (use "multi" or "worldwide" '
'for multiple territories). Provided value: "%s"' % touos)
return touos
@classmethod
def validate_message_created_datetime(cls, mcdt, file_name):
if not constants.MESSAGE_CREATED_DATETIME_PATTERN.match(mcdt):
raise error.FileNameValidationFailure(
file_name, 'MessageCreated-DateTime "%s" is invalid, should be '
'yyyyymmddThhmmss.' % mcdt)
return mcdt
@classmethod
def split_file_name(cls, file_name, expected_components):
"""Splits the file name to a dictionary keyed by components names.
Args:
file_name: File name to split.
expected_components: A list of the expected file name parts.
Returns:
A dictionary of the file name components names (keys) and the given file
name parts (values).
"""
basic_split = file_name.split(constants.FILE_NAME_DELIMITER)
if len(basic_split) != len(constants.FILE_NAME_COMPONENTS) - 2:
raise error.FileNameValidationFailure(
file_name, 'bad name structure, expected format: %s.' %
constants.FILE_NAME_FORMAT)
xofy = basic_split[-2]
message_created_time_ext = basic_split[-1]
file_name_parts = basic_split[:-2]
xofy = xofy.split('of')
message_created_time_ext = message_created_time_ext.split('.', 1)
file_name_parts.extend(xofy)
file_name_parts.extend(message_created_time_ext)
if len(file_name_parts) != len(constants.FILE_NAME_COMPONENTS):
raise error.FileNameValidationFailure(
file_name, 'bad name structure, expected format: %s.' %
constants.FILE_NAME_FORMAT)
file_name_dict = {component_name: value for component_name, value in
zip(expected_components, file_name_parts)}
return file_name_dict
| 38.918367 | 80 | 0.707394 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
from dsrf import constants
from dsrf import error
class FileNameValidator(object):
def __init__(self, expected_components):
self.expected_components = expected_components
def validate_value(self, file_name):
warnings = set()
file_name_dict = self.split_file_name(file_name, self.expected_components)
try:
self.validate_xofy(file_name_dict['x'], file_name_dict['y'], file_name)
self.validate_prefix(file_name_dict['DSR'], file_name,)
self.validate_suffix(file_name_dict['ext'], file_name)
self.validate_message_notification_period(
file_name_dict['MessageNotificationPeriod'], file_name)
self.validate_territory_of_use_or_sale(
file_name_dict['TerritoryOfUseOrSale'], file_name)
self.validate_message_created_datetime(
file_name_dict['MessageCreatedDateTime'], file_name)
except KeyError:
raise error.FileNameValidationFailure(
file_name, 'bad name structure, expected format: %s.' %
constants.FILE_NAME_FORMAT)
except error.FileNameValidationWarning as e:
warnings.add(e)
return file_name_dict, warnings
@classmethod
def validate_xofy(cls, x, y, file_name):
try:
if int(x) <= int(y):
return x, y
except ValueError:
pass
raise error.FileNameValidationFailure(
file_name, 'File number is not an integer or does not exist.')
@classmethod
def validate_prefix(cls, prefix, file_name):
if prefix != constants.FILE_NAME_PREFIX:
raise error.FileNameValidationFailure(
file_name, 'File name should start with %s.' %
constants.FILE_NAME_PREFIX)
return prefix
@classmethod
def validate_suffix(cls, suffix, file_name):
if suffix not in constants.SUPPORTED_FILE_EXTENSIONS:
raise error.FileNameValidationFailure(
file_name, 'Suffix "%s" is not valid, supported suffixes: %s.' % (
suffix, constants.SUPPORTED_FILE_EXTENSIONS))
return suffix
@classmethod
def validate_message_notification_period(cls, mnp, file_name):
if not constants.MESSAGE_NOTIFICATION_PERIOD_PATTERN.match(mnp):
raise error.FileNameValidationFailure(
file_name, 'Message Notification Period "%s" is invalid, should be '
'ISO 8601:2004 period format.' % mnp)
return mnp
@classmethod
def validate_territory_of_use_or_sale(cls, touos, file_name):
if not constants.TERRITORY_OF_USE_OR_SALE_PATTERN.match(touos):
raise error.FileNameValidationWarning(
file_name,
'It is recommended that the TerritoryOfUseOrSale be set to a '
'CISAC TIS code or a two-letter ISO code (use "multi" or "worldwide" '
'for multiple territories). Provided value: "%s"' % touos)
return touos
@classmethod
def validate_message_created_datetime(cls, mcdt, file_name):
if not constants.MESSAGE_CREATED_DATETIME_PATTERN.match(mcdt):
raise error.FileNameValidationFailure(
file_name, 'MessageCreated-DateTime "%s" is invalid, should be '
'yyyyymmddThhmmss.' % mcdt)
return mcdt
@classmethod
def split_file_name(cls, file_name, expected_components):
basic_split = file_name.split(constants.FILE_NAME_DELIMITER)
if len(basic_split) != len(constants.FILE_NAME_COMPONENTS) - 2:
raise error.FileNameValidationFailure(
file_name, 'bad name structure, expected format: %s.' %
constants.FILE_NAME_FORMAT)
xofy = basic_split[-2]
message_created_time_ext = basic_split[-1]
file_name_parts = basic_split[:-2]
xofy = xofy.split('of')
message_created_time_ext = message_created_time_ext.split('.', 1)
file_name_parts.extend(xofy)
file_name_parts.extend(message_created_time_ext)
if len(file_name_parts) != len(constants.FILE_NAME_COMPONENTS):
raise error.FileNameValidationFailure(
file_name, 'bad name structure, expected format: %s.' %
constants.FILE_NAME_FORMAT)
file_name_dict = {component_name: value for component_name, value in
zip(expected_components, file_name_parts)}
return file_name_dict
| true | true |
f725013d099dbb0b8a35ade5b1cc606b7b8eb889 | 3,373 | py | Python | webcams/eye_status.py | OlegBezverhii/python-notebooks | 5d4b501173a2f3519bff9a085c3d2190ce6cf808 | [
"MIT"
] | null | null | null | webcams/eye_status.py | OlegBezverhii/python-notebooks | 5d4b501173a2f3519bff9a085c3d2190ce6cf808 | [
"MIT"
] | null | null | null | webcams/eye_status.py | OlegBezverhii/python-notebooks | 5d4b501173a2f3519bff9a085c3d2190ce6cf808 | [
"MIT"
] | null | null | null | import os
from PIL import Image
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator
from imageio import imread, imwrite
from skimage.transform import resize
IMG_SIZE = 24
def collect():
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
horizontal_flip=True,
)
val_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
horizontal_flip=True, )
train_generator = train_datagen.flow_from_directory(
directory="dataset/train",
target_size=(IMG_SIZE, IMG_SIZE),
color_mode="grayscale",
batch_size=32,
class_mode="binary",
shuffle=True,
seed=42
)
val_generator = val_datagen.flow_from_directory(
directory="dataset/val",
target_size=(IMG_SIZE, IMG_SIZE),
color_mode="grayscale",
batch_size=32,
class_mode="binary",
shuffle=True,
seed=42
)
return train_generator, val_generator
def save_model(model):
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
def load_model():
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return loaded_model
def train(train_generator, val_generator):
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=val_generator.n//val_generator.batch_size
print('[LOG] Intialize Neural Network')
model = Sequential()
model.add(Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=(IMG_SIZE,IMG_SIZE,1)))
model.add(AveragePooling2D())
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
model.add(AveragePooling2D())
model.add(Flatten())
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=84, activation='relu'))
model.add(Dense(units=1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=val_generator,
validation_steps=STEP_SIZE_VALID,
epochs=20
)
save_model(model)
def predict(img, model):
img = Image.fromarray(img, 'RGB').convert('L')
print(img)
img = resize(img, (IMG_SIZE,IMG_SIZE)).astype('float32')/255
print(img)
img = img.reshape(1,IMG_SIZE,IMG_SIZE,1)
prediction = model.predict(img)
if prediction < 0.1:
prediction = 'closed'
elif prediction > 0.9:
prediction = 'open'
else:
prediction = 'idk'
return prediction
def evaluate(X_test, y_test):
model = load_model()
print('Evaluate model')
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
print(acc * 100)
if __name__ == '__main__':
train_generator , val_generator = collect()
train(train_generator,val_generator)
| 26.769841 | 103 | 0.731693 | import os
from PIL import Image
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator
from imageio import imread, imwrite
from skimage.transform import resize
IMG_SIZE = 24
def collect():
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
horizontal_flip=True,
)
val_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
horizontal_flip=True, )
train_generator = train_datagen.flow_from_directory(
directory="dataset/train",
target_size=(IMG_SIZE, IMG_SIZE),
color_mode="grayscale",
batch_size=32,
class_mode="binary",
shuffle=True,
seed=42
)
val_generator = val_datagen.flow_from_directory(
directory="dataset/val",
target_size=(IMG_SIZE, IMG_SIZE),
color_mode="grayscale",
batch_size=32,
class_mode="binary",
shuffle=True,
seed=42
)
return train_generator, val_generator
def save_model(model):
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("model.h5")
def load_model():
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("model.h5")
loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return loaded_model
def train(train_generator, val_generator):
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=val_generator.n//val_generator.batch_size
print('[LOG] Intialize Neural Network')
model = Sequential()
model.add(Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=(IMG_SIZE,IMG_SIZE,1)))
model.add(AveragePooling2D())
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
model.add(AveragePooling2D())
model.add(Flatten())
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=84, activation='relu'))
model.add(Dense(units=1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=val_generator,
validation_steps=STEP_SIZE_VALID,
epochs=20
)
save_model(model)
def predict(img, model):
img = Image.fromarray(img, 'RGB').convert('L')
print(img)
img = resize(img, (IMG_SIZE,IMG_SIZE)).astype('float32')/255
print(img)
img = img.reshape(1,IMG_SIZE,IMG_SIZE,1)
prediction = model.predict(img)
if prediction < 0.1:
prediction = 'closed'
elif prediction > 0.9:
prediction = 'open'
else:
prediction = 'idk'
return prediction
def evaluate(X_test, y_test):
model = load_model()
print('Evaluate model')
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
print(acc * 100)
if __name__ == '__main__':
train_generator , val_generator = collect()
train(train_generator,val_generator)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.