blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2ac2503dd397aaf8de2fa020b3fa84696e860a31
|
0e4860fecfdd34a3255003cc8c8df086c14083dd
|
/python/practise/带你学DjangoRESTframework/DRF/my_drf/app05/views.py
|
626cfdba50f5bf5833728ffc383f55d83fb59e5e
|
[] |
no_license
|
anzhihe/learning
|
503ab9a58f280227011da5eaa4b14b46c678e6f3
|
66f7f801e1395207778484e1543ea26309d4b354
|
refs/heads/master
| 2023-08-08T11:42:11.983677
| 2023-07-29T09:19:47
| 2023-07-29T09:19:47
| 188,768,643
| 1,443
| 617
| null | 2023-08-24T02:10:34
| 2019-05-27T04:04:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,192
|
py
|
views.py
|
from django.shortcuts import render
from rest_framework.views import APIView
from django.http import JsonResponse
from rest_framework.authentication import BaseAuthentication, BasicAuthentication, TokenAuthentication, \
SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import exceptions
from .models import User, UserToken
from .throttlings import VisitThrottling
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
# Create your views here.
import time
import hashlib
from django.http import HttpResponse
def get_md5(user):
ctime = str(time.time())
m = hashlib.md5(bytes(user, encoding='utf-8'))
m.update(bytes(ctime, encoding='utf-8'))
return m.hexdigest()
class LoginView(APIView):
def post(self, request, *args, **kwargs):
ret = {'code': 1, 'msg': None, 'data': {}}
# user = request._request.POST.get('username')
# user = request._request.POST.get('username')
user = request.POST.get('username')
pwd = request.POST.get('password')
obj = User.objects.filter(username=user, password=pwd).first()
if not obj:
ret['code'] = -1
ret['msg'] = "用户名或密码错误"
token = get_md5(user)
UserToken.objects.update_or_create(user=obj, defaults={'token': token})
ret['token'] = token
return JsonResponse(ret)
class MyAuthentication(BaseAuthentication):
def authenticate(self, request):
token = request.META.get('HTTP_TOKEN')
obj = UserToken.objects.filter(token=token).first()
if not obj:
raise exceptions.AuthenticationFailed('验证失败')
else:
return (obj.user, obj)
class CartView(APIView):
# 都是局部的
# 基于什么登录认证的
# authentication_classes = [BasicAuthentication, TokenAuthentication, SessionAuthentication]
# 自己写的认证类
# authentication_classes = [MyAuthentication]
# 基于jwt验证
# authentication_classes = [JSONWebTokenAuthentication]
# 只有登录才能访问
# permission_classes = [IsAuthenticated]
# 自己的权限
# permission_classes = [MyPermission]
# 节流 局部
throttle_classes = [VisitThrottling]
def get(self, request, *args, **kwargs):
ctx = {
"code": 1,
"msg": "ok",
"data": {
"goods": [
{
"name": "苹果",
"price": 12
},
{
"name": "苹果1",
"price": 13
},
]
}
}
return JsonResponse(ctx)
def put(self, request, *args, **kwargs):
return HttpResponse('ok')
class VersionView(APIView):
def get(self, request, *args, **kwargs):
print(request.version)
if request.version == 'v1':
ctx = {"code": 1, "msg": "ok", "data": {}}
return JsonResponse(ctx)
else:
ctx = {"code": 2, "msg": "ok", "data": {}}
return JsonResponse(ctx)
|
bdbb0da14e7677405830ee167f30f586aee09567
|
0ba2e5061577f6286ff9265ef1df9aca96769445
|
/math/sieve of eratosthenes/python/sieveoferalothenes.py
|
0a2763cb656ebad96d165f648a9c54f0e72d94df
|
[
"CC0-1.0"
] |
permissive
|
ZoranPandovski/al-go-rithms
|
68d5d02f80a61de9baf8e50a81a52e7d0b3983a0
|
4ae6ba54e90af14af236e03e435eb0402dcac787
|
refs/heads/master
| 2023-09-04T16:04:04.321676
| 2023-06-06T15:22:16
| 2023-06-06T15:22:16
| 93,438,176
| 1,421
| 2,445
|
CC0-1.0
| 2023-06-15T14:24:28
| 2017-06-05T19:20:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 965
|
py
|
sieveoferalothenes.py
|
#about the algorithm-The sieve of Eratosthenes is one of the most efficient ways to find all primes smaller than n when n is smaller than 10 million or soThe sieve of Eratosthenes is one of the most efficient ways to find all primes smaller than n when n is smaller than 10 million or so.
#The algorithm is very fast- time complexity of the algorithm- O(n).
#this progran will check for prime numbers to a certain range and if the prime is prime it will display True else False
import math
n=int(input("Enter the number\t"))
isPrime=[]
for i in range(0,n,1):
isPrime.append(True) #assigning the array to False uptil n
isPrime[0]=False
isPrime[1]=False
for i in range(2,int(math.sqrt(n))+1,1):
for j in range(2*i,n,i): #checking for prime numbers
isPrime[j]=False
count_prime=0
for i in range(1,n,1):
if isPrime[i]==True:
count_prime+=1
print(i,":",isPrime[i])
print("The number of prime numbers upto the chosen range is",count_prime)
|
8cb5b80d06f0994cd5a54e59c691b417c3f70ee0
|
57bc404899f914eeef7ba298bf1e99883c864a26
|
/linked_list/library/doubly_linked_list.py
|
dd6f901831c51805769392e63d72e23715b61aa3
|
[
"MIT"
] |
permissive
|
priyankchheda/algorithms
|
547f19193273ac6a424fe4ba5e1375cc02ea4f60
|
38a5de72db14ef2664489da9857b598d24c4e276
|
refs/heads/master
| 2023-08-17T17:10:10.044940
| 2022-04-16T13:52:37
| 2022-04-16T13:52:37
| 133,684,565
| 195
| 38
|
MIT
| 2023-08-16T10:26:48
| 2018-05-16T15:10:56
|
C++
|
UTF-8
|
Python
| false
| false
| 5,287
|
py
|
doubly_linked_list.py
|
""" Implementation of Doubly linked list Data Structure """
class Node:
""" Node class contains everything related to linked list node """
def __init__(self, data):
""" initializing single node with data """
self.data = data
self.next = None
self.previous = None
class DoublyLinkedList:
""" A Doubly linked list (DLL) contains an extra pointer, typically
called previous pointer, together with next pointer and data which
are there in singly linked list.
"""
def __init__(self):
""" initializing doublt linked list with zero node """
self.head = None
def is_empty(self):
""" returns True if the linked list is empty. Otherwise, return False.
"""
return self.head is None
def __len__(self):
""" Traverses the linked list and returns an integer value representing
the number of nodes in the linked list.
The time complexity is O(n) because every node in the linked list
must be visited in order to calculate the size of the linked list.
"""
if self.head is None:
return 0
count = 0
current = self.head
# While there are still Nodes left to count
while current is not None:
count += 1
current = current.next
return count
def insert_head(self, data):
""" inserts node at the start of doubly linked list """
if self.head is None:
self.head = Node(data)
return
new_node = Node(data)
current = self.head
new_node.next = current
current.previous = new_node
self.head = new_node
def insert_tail(self, data):
""" inserts node at the end of doubly linked list """
if self.head is None:
self.head = Node(data)
return
current = self.head
new_node = Node(data)
while current.next is not None:
current = current.next
current.next = new_node
new_node.previous = current
def insert_at(self, position, data):
""" inserts node at particular position in doubly linked list.
index starts from 0.
"""
dll_size = len(self)
if position < 0 or position > dll_size:
raise Exception("Invalid position")
if position == dll_size:
self.insert_tail(data)
return
if position == 0:
self.insert_head(data)
return
current = self.head
for _ in range(0, position):
current = current.next
new_node = Node(data)
previous = current.previous
previous.next = new_node
current.previous = new_node
new_node.next = current
new_node.previous = previous
def delete_head(self):
""" removes first node and returns data. Raise exception,
if doubly linked list is empty
"""
if self.head is None:
raise Exception("linked list is already empty")
if self.head.next is None:
self.head = None
return
self.head = self.head.next
self.head.previous = None
def delete_tail(self):
""" removes last node and returns data. raise exception,
if doubly linked list is empty
"""
if self.head is None:
raise Exception("linked list is already empty")
if self.head.next is None:
self.head = None
return
current = self.head
while current.next is not None:
current = current.next
previous = current.previous
current.previous = None
previous.next = None
del current
def delete_at(self, position):
""" removes specified node from doubly linked list and returns data.
raise exception, if position is invalid.
"""
dll_size = len(self)
if position < 0 or position > dll_size:
raise Exception("Invalid position")
if position == 0:
self.delete_head()
elif position == dll_size:
self.delete_tail()
else:
current = self.head
for _ in range(0, position):
current = current.next
previous_node = current.previous
next_node = current.next
previous_node.next = next_node
next_node.previous = previous_node
del current
def print(self):
""" prints entire linked list without changing underlying data """
current = self.head
while current is not None:
print(" <->", current.data, end="")
current = current.next
print()
def main():
""" operational function """
dll = DoublyLinkedList()
print("is_empty?", dll.is_empty())
dll.insert_head(3)
print("is_empty?", dll.is_empty())
dll.insert_head(2)
dll.insert_head(1)
dll.insert_tail(4)
dll.insert_tail(5)
dll.insert_tail(6)
print("size?", len(dll))
dll.print()
dll.insert_at(3, 66)
dll.print()
print("deleting head")
dll.delete_at(3)
dll.print()
if __name__ == '__main__':
main()
|
87036940e7a87cc1a213d9a70265ac47e82edb7d
|
c3b69ed405d910eb4783107f65dfe5af509e4f9f
|
/src/simplify_docx/iterators/body.py
|
690b89dc86d38ee485f887a19259522cd437c637
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/Simplify-Docx
|
32e992a8313ca20ffe69dd0d32371436ecc4bed1
|
7f41e9195f5eb9e40558d57dc41f325f92a1b3a7
|
refs/heads/master
| 2023-09-01T08:59:34.971626
| 2022-06-13T17:59:06
| 2022-06-13T17:59:06
| 178,740,813
| 160
| 37
|
MIT
| 2022-06-13T17:59:07
| 2019-03-31T20:50:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,218
|
py
|
body.py
|
"""
Iterate over containers (i.e. "things that can contain EG_BlockLevelElts")
"""
from docx.oxml.ns import qn
from .generic import register_iterator
from ..elements import paragraph, table, empty, altChunk
# RANGE MARKUP
register_iterator(
"EG_RangeMarkupElements",
TAGS_TO_IGNORE=[
qn("w:bookmarkStart"),
qn("w:bookmarkEnd"),
qn("w:commentRangeStart"),
qn("w:commentRangeEnd"),
qn("w:moveToRangeStart"),
qn("w:moveToRangeEnd"),
],
TAGS_TO_WARN={
qn("w:customXmlInsRangeStart"): "Ignoring Revision Tags",
qn("w:customXmlInsRangeEnd"): "Ignoring Revision Tags",
qn("w:customXmlDelRangeStart"): "Ignoring Revision Tags",
qn("w:customXmlDelRangeEnd"): "Ignoring Revision Tags",
qn("w:customXmlMoveFromRangeStart"): "Ignoring Revision Tags",
qn("w:customXmlMoveFromRangeEnd"): "Ignoring Revision Tags",
qn("w:customXmlMoveToRangeStart"): "Ignoring Revision Tags",
qn("w:customXmlMoveToRangeEnd"): "Ignoring Revision Tags",
},
TAGS_TO_SKIP={qn("w:moveFromRangeStart"): ("id", qn("w:MoveFromRangeEnd"))},
)
# RUN LEVEL LEMENTS
register_iterator(
"EG_RunLevelElts",
TAGS_TO_YIELD={qn("m:oMathPara"): empty, qn("m:oMath"): empty},
TAGS_TO_NEST={qn("w:ins"): "EG_RunLevelElts", qn("w:moveTo"): "EG_RunLevelElts"},
TAGS_TO_IGNORE=[
# INVISIBLE THINGS
qn("w:proofErr"),
qn("w:permStart"),
qn("w:permEnd"),
qn("w:del"),
qn("w:moveFrom"),
qn("w:commentRangeStart"),
qn("w:commentRangeEnd"),
# RANGE MARKER
qn("w:moveToRangeStart"),
qn("w:moveToRangeEnd"),
],
extends=["EG_RangeMarkupElements"],
)
# BLOCK LEVEL ELEMENTS
register_iterator(
"EG_BlockLevelElts",
TAGS_TO_YIELD={
qn("w:p"): paragraph,
qn("w:tbl"): table,
qn("w:sdt"): empty,
qn("w:altChunk"): altChunk,
},
TAGS_TO_NEST={qn("w:customXml"): "EG_BlockLevelElts"},
TAGS_TO_IGNORE=[qn("w:sectPr"), qn("w:tcPr"), qn("w:pPr")],
extends=["EG_RunLevelElts"],
)
# BODY
register_iterator(
"CT_Body", TAGS_TO_IGNORE=[qn("w:sectPr")], extends=["EG_BlockLevelElts"]
)
|
2eab43ec119b18a070c3e5d6cf97ab3f319721ab
|
0ba2e5061577f6286ff9265ef1df9aca96769445
|
/dp/knapsack_problem/py/FractionalKnapsack.py
|
2febf9863e17071441247ecfacf9d50f18fac7af
|
[
"CC0-1.0"
] |
permissive
|
ZoranPandovski/al-go-rithms
|
68d5d02f80a61de9baf8e50a81a52e7d0b3983a0
|
4ae6ba54e90af14af236e03e435eb0402dcac787
|
refs/heads/master
| 2023-09-04T16:04:04.321676
| 2023-06-06T15:22:16
| 2023-06-06T15:22:16
| 93,438,176
| 1,421
| 2,445
|
CC0-1.0
| 2023-06-15T14:24:28
| 2017-06-05T19:20:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 449
|
py
|
FractionalKnapsack.py
|
##### GREEDY SOLUTION #####
from itertools import accumulate
from bisect import bisect
def fracKnapsack(vl, wt, W, n):
r = list(sorted(zip(vl,wt), key=lambda x:x[0]/x[1],reverse=True))
vl , wt = [i[0] for i in r],[i[1] for i in r]
acc=list(accumulate(wt))
k = bisect(acc,W)
return 0 if k == 0 else sum(vl[:k])+(W-acc[k-1])*(vl[k])/(wt[k]) if k!=n else sum(vl[:k])
print("%.0f"%fracKnapsack([60, 100, 120],[10, 20, 30],50,3))
|
e8296cbf0f53136bedcce6ab5f44da417925ddca
|
48d1002394d233cf5932c7ef69300400af79118a
|
/examples/widgets/actionbar.py
|
b48db9ad4a575b2140c5913ed9efe111a6d824d9
|
[
"LGPL-2.1-only",
"MIT",
"Apache-2.0"
] |
permissive
|
kivy/kivy
|
ba2668bffe4e125fd1c5aace54f671343802850e
|
ca1b918c656f23e401707388f25f4a63d9b8ae7d
|
refs/heads/master
| 2023-09-04T02:27:05.311875
| 2023-08-26T08:00:20
| 2023-08-26T08:00:20
| 1,049,095
| 16,076
| 4,161
|
MIT
| 2023-09-09T07:55:18
| 2010-11-03T20:27:32
|
Python
|
UTF-8
|
Python
| false
| false
| 826
|
py
|
actionbar.py
|
from kivy.base import runTouchApp
from kivy.lang import Builder
runTouchApp(Builder.load_string('''
ActionBar:
pos_hint: {'top':1}
ActionView:
use_separator: True
ActionPrevious:
title: 'Action Bar'
with_previous: False
ActionOverflow:
ActionButton:
icon: 'atlas://data/images/defaulttheme/audio-volume-high'
ActionButton:
important: True
text: 'Important'
ActionButton:
text: 'Btn2'
ActionButton:
text: 'Btn3'
ActionButton:
text: 'Btn4'
ActionGroup:
text: 'Group1'
ActionButton:
text: 'Btn5'
ActionButton:
text: 'Btn6'
ActionButton:
text: 'Btn7'
'''))
|
e8c1fda4f0b59d639860106fc201b8fdc01ed788
|
12f0bd77926127cdacc2452d6f9cfed91806b2fe
|
/idaes/core/base/property_meta.py
|
b12aee115cbd8ae104744c9e98becf8c311f3568
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
IDAES/idaes-pse
|
e03d2583ae1ba968a7099f9f439fd8c3efa12904
|
deacf4c422bc9e50cb347e11a8cbfa0195bd4274
|
refs/heads/main
| 2023-08-16T19:13:00.355572
| 2023-08-04T04:19:29
| 2023-08-04T04:19:29
| 168,622,088
| 173
| 227
|
NOASSERTION
| 2023-09-11T16:04:55
| 2019-02-01T01:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 20,284
|
py
|
property_meta.py
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
"""
These classes handle the metadata aspects of classes representing
property packages.
Implementors of property packages need to do the following:
1. Create a new class that inherits from
:class:`idaes.core.property_base.PhysicalParameterBlock`, which in turn
inherits from :class:`HasPropertyClassMetadata`, in this module.
2. In that class, implement the `define_metadata()` method, inherited from
:class:`HasPropertyClassMetadata`. This method is called
automatically, once, when the `get_metadata()` method is first invoked.
An empty metadata object (an instance of :class:`PropertyClassMetadata`)
will be passed in, which the method should populate with information about
properties and default units.
Example::
from idaes.core.property_base import PhysicalParameterBlock
class MyPropParams(PhysicalParameterBlock):
@classmethod
def define_metadata(cls, meta):
meta.add_default_units({foo.U.TIME: 'fortnights',
foo.U.MASS: 'stones'})
meta.add_properties({'under_sea': {'units': 'leagues'},
'tentacle_size': {'units': 'yards'}})
meta.add_required_properties({'under_sea': 'leagues',
'tentacle_size': 'yards'})
# Also, of course, implement the non-metadata methods that
# do the work of the class.
"""
# TODO: Missing docstrings
# pylint: disable=missing-function-docstring
from pyomo.environ import units
from pyomo.core.base.units_container import _PyomoUnit, InconsistentUnitsError
from pyomo.common.deprecation import deprecation_warning
from idaes.core.util.exceptions import PropertyPackageError
from idaes.core.base.property_set import StandardPropertySet, PropertySetBase
import idaes.logger as idaeslog
__author__ = "Dan Gunter <dkgunter@lbl.gov>, Andrew Lee"
_log = idaeslog.getLogger(__name__)
class HasPropertyClassMetadata(object):
"""Interface for classes that have PropertyClassMetadata."""
_metadata = None
@classmethod
def get_metadata(cls):
"""Get property parameter metadata.
If the metadata is not defined, this will instantiate a new
metadata object and call `define_metadata()` to set it up.
If the metadata is already defined, it will be simply returned.
Returns:
PropertyClassMetadata: The metadata
"""
if cls._metadata is None:
pcm = PropertyClassMetadata()
cls.define_metadata(pcm)
cls._metadata = pcm
# Check that the metadata was actually populated
# Check requires looking at private attributes
# pylint: disable-next=protected-access
if pcm._properties is None or pcm._default_units is None:
raise PropertyPackageError(
"Property package did not populate all expected metadata."
)
return cls._metadata
@classmethod
def define_metadata(cls, pcm):
"""Set all the metadata for properties and units.
This method should be implemented by subclasses.
In the implementation, they should set information into the
object provided as an argument.
Args:
pcm (PropertyClassMetadata): Add metadata to this object.
Returns:
None
"""
raise NotImplementedError()
class UnitSet(object):
"""
Object defining the set of recognised quantities in IDAES and their base units.
Units of measurement are defined by setting units for the seven base SI quantities
(amount, current, length, luminous intensity, mass, temperature and time), from which units
for all other quantities are derived. The units of the seven base quantities must be provided
when instantiating the UnitSet, otherwise base SI units are assumed.
Units can be accesses by via either a property on the UnitSet (e.g., UnitSet.TIME) or
via an index on the UnitSet (e.g., UnitSet["time"]).
"""
_base_quantities = {
"AMOUNT": units.mol,
"CURRENT": units.ampere,
"LENGTH": units.meter,
"LUMINOUS_INTENSITY": units.candela,
"MASS": units.kilogram,
"TEMPERATURE": units.kelvin,
"TIME": units.seconds,
}
def __init__(self):
self._time = units.seconds
self._length = units.meter
self._mass = units.kilogram
self._amount = units.mole
self._temperature = units.kelvin
self._current = units.ampere
self._luminous_intensity = units.candela
def set_units(
self,
amount: _PyomoUnit = units.mol,
current: _PyomoUnit = units.ampere,
length: _PyomoUnit = units.meter,
luminous_intensity: _PyomoUnit = units.candela,
mass: _PyomoUnit = units.kilogram,
temperature: _PyomoUnit = units.kelvin,
time: _PyomoUnit = units.seconds,
):
"""
Set desired units of measurement for the seven base quantities.
Args:
amount: units for amount (default = moles)
current: units for current (default = Amperes)
length: units for length (default = meters)
luminous_intensity: units for luminous intensity (default = candela)
mass: units for mass (default = kilograms)
temperature: units for temperature (default = Kelvins)
time: units for time (default = seconds)
Returns:
None
"""
self._time = time
self._length = length
self._mass = mass
self._amount = amount
self._temperature = temperature
self._current = current
self._luminous_intensity = luminous_intensity
# Check that valid units were assigned
for q, expected_dim in self._base_quantities.items():
u = getattr(self, q)
if not isinstance(u, _PyomoUnit):
# Check for non-unit inputs from user
raise PropertyPackageError(
f"Unrecognized units of measurement for quantity {q} ({u})"
)
# Check for expected dimensionality
try:
# Try to convert user-input to SI units of expected dimensions
units.convert(u, expected_dim)
except InconsistentUnitsError:
# An error indicates a mismatch in units or the units registry
raise PropertyPackageError(
f"Invalid units of measurement for quantity {q} ({u}). "
"Please ensure units provided are valid for this quantity and "
"use the Pyomo unit registry."
)
def __getitem__(self, key: str):
try:
# Check to catch cases where luminous intensity has a space
return getattr(self, key.upper().replace(" ", "_"))
except AttributeError:
raise PropertyPackageError(
f"Unrecognised quantity {key}. Please check that this is a recognised quantity "
"defined in idaes.core.base.property_meta.UnitSet."
)
def unitset_is_consistent(self, other: "UnitSet"):
"""
Checks that defined units of measurement for base quantities are consistent with those
in other UnitSet.
Args:
other: UnitSet to check for consistency with
Returns:
Bool indicating whether units are consistent
"""
return all(getattr(self, q) is getattr(other, q) for q in self._base_quantities)
@property
def TIME(self):
return self._time
@property
def LENGTH(self):
return self._length
@property
def MASS(self):
return self._mass
@property
def AMOUNT(self):
return self._amount
@property
def TEMPERATURE(self):
return self._temperature
@property
def CURRENT(self):
return self._current
@property
def LUMINOUS_INTENSITY(self):
return self._luminous_intensity
# Length based
@property
def AREA(self):
return self._length**2
@property
def VOLUME(self):
return self._length**3
@property
def MOLAR_VOLUME(self):
return self._length**3 * self._amount**-1
# Flows
@property
def FLOW_MASS(self):
return self._mass * self._time**-1
@property
def FLOW_MOLE(self):
return self._amount * self._time**-1
@property
def FLOW_VOL(self):
return self._length**3 * self._time**-1
@property
def FLUX_MASS(self):
return self._mass * self._time**-1 * self._length**-2
@property
def FLUX_MOLE(self):
return self._amount * self._time**-1 * self._length**-2
@property
def FLUX_ENERGY(self):
return self._mass * self._time**-3
# Velocity, Acceleration and Force
@property
def VELOCITY(self):
return self._length * self._time**-1
@property
def ACCELERATION(self):
return self._length * self._time**-2
@property
def FORCE(self):
return self._length * self._mass * self._time**-2
# Pressures
@property
def PRESSURE(self):
return self._mass * self._length**-1 * self._time**-2
@property
def GAS_CONSTANT(self):
return (
self._mass
* self._length**2
* self._time**-2
* self._temperature**-1
* self._amount**-1
)
# Densities & Concentrations
@property
def DENSITY_MASS(self):
return self._mass * self._length**-3
@property
def DENSITY_MOLE(self):
return self._amount * self._length**-3
@property
def MOLALITY(self):
return self._amount * self._mass
@property
def MOLECULAR_WEIGHT(self):
return self._mass / self._amount
# Energy
@property
def ENERGY(self):
return self._mass * self._length**2 * self._time**-2
@property
def ENERGY_MASS(self):
return self._length**2 * self._time**-2
@property
def ENERGY_MOLE(self):
return self._mass * self._length**2 * self._time**-2 * self._amount**-1
@property
def POWER(self):
return self._mass * self._length**2 * self._time**-3
@property
def VOLTAGE(self):
return self._mass * self._length**2 * self._time**-3 * self._current**-1
# Heat Related
@property
def HEAT_CAPACITY_MASS(self):
return self._length**2 * self._time**-2 * self._temperature**-1
@property
def HEAT_CAPACITY_MOLE(self):
return (
self._mass
* self._length**2
* self._time**-2
* self._temperature**-1
* self._amount**-1
)
@property
def HEAT_TRANSFER_COEFFICIENT(self):
return self._mass * self._time**-3 * self._temperature**-1
# Entropy
@property
def ENTROPY(self):
return (
self._mass * self._length**2 * self._time**-2 * self._temperature**-1
)
@property
def ENTROPY_MASS(self):
return self._length**2 * self._time**-2 * self._temperature**-1
@property
def ENTROPY_MOLE(self):
return (
self._mass
* self._length**2
* self._time**-2
* self._temperature**-1
* self._amount**-1
)
# Transport Properties
@property
def DIFFUSIVITY(self):
return self._length**2 * self._time**-1
@property
def DYNAMIC_VISCOSITY(self):
return self._mass * self._length**-1 * self._time**-1
@property
def KINEMATIC_VISCOSITY(self):
return self._length**2 * self._time**-1
@property
def SURFACE_TENSION(self):
return self._mass * self._time**-2
@property
def THERMAL_CONDUCTIVITY(self):
return self._mass * self._length * self._time**-3 * self._temperature**-1
class PropertyClassMetadata(object):
"""
Container for metadata about the property class, which includes
default units and properties.
Example usage::
foo = PropertyClassMetadata()
foo.add_default_units(time = pyo.units.fortnights,
mass = pyo.units.stones)
foo.add_properties({'under_sea': {'method': 'submarine', 'units': 'leagues', 'required': False, 'supported': True},
'tentacle_size': {'method': 'kraken', 'units': 'yards', 'required': True, 'supported': True}})
"""
def __init__(self):
# TODO: Deprecate in favour of common units property
self._default_units = UnitSet()
# Assume a default PropertySet to begin with. Property packages can replace this
# with more specialized forms if required
self._properties = StandardPropertySet(parent=self)
def define_property_set(self, propset: PropertySetBase):
"""
Define the type of property set to use for this package.
Args:
propset: PropertySet class (must derive from PropertySetBase)
Returns:
None
"""
if not issubclass(propset, PropertySetBase):
raise PropertyPackageError(
f"{propset} does not derive from IDAES PropertySetBase class."
)
self._properties = propset(parent=self)
@property
def default_units(self):
# TODO: Deprecate in favour of common units property
return self._default_units
@property
def derived_units(self):
# TODO: Deprecate in favour of common units property
return self._default_units
@property
def properties(self):
return self._properties
def add_default_units(self, u: dict):
"""
Set units of measurement for base quantities used in this property package. Units
should be provided as a dict with keys being the seven base quantities and values
being Pyomo unit expressions. These will be used to update the UnitSet associated
with this property package.
Args:
u (dict): Key=property, Value=units
Returns:
None
Raises:
TypeError if definitions for unexpected quantities are found
"""
# TODO: Could look at replacing dict with defined arguments
# This would be a big API change
try:
self._default_units.set_units(**u)
except TypeError:
raise TypeError(
"Unexpected argument for base quantities found when creating UnitSet. "
"Please ensure that units are only defined for the seven base quantities."
)
def add_properties(self, p: dict):
"""Add properties to the metadata.
For each property, the value should be another dict which may contain
the following keys:
- 'units': (optional) units of measurement for the property.
- 'indices': (optional) list of sub-property indices for this property. If None, use default set, if False unindexed.
- 'method': (optional, only if 'indices' is None or False) the name of a method to construct the
property as a str, or None if the property will be
constructed by default.
- 'supported': (optional, only if 'indices' is None or False) bool indicating if this property is
supported by this package.
- 'required': (optional, only if 'indices' is None or False) bool indicating if this property is
required by this package.
- 'valid_range': (optional, only if 'indices' is None or False) 2-tuple containing range of validity for
property values (lower, upper).
- 'initialize': (optional) dict indicating 'method', 'required', 'supported' and 'valid_range' values for sub-properties by index.
Args:
p (dict): Key=property, Value=dict
Returns:
None
"""
# TODO: Deprecate in favour of directly updating or adding metadata
for k, v in p.items():
units = v.pop("units", None)
try:
try:
n, i = self._properties.get_name_and_index(k)
except ValueError:
msg = (
f"The property name {k} in property metadata is not a recognized "
"standard property name defined in this PropertySet. Please refer "
"to IDAES standard names in the IDAES documentation. You can use "
"the define_custom_properties() rather than the add_properties() "
"method to define metadata for this property. You can also use a "
"different property set by calling the define_property_set() method."
)
deprecation_warning(
msg=msg, logger=_log, version="2.0.0", remove_in="3.0.0"
)
n = k
i = None
getattr(self._properties, n)[i].update_property(**v)
except AttributeError:
# TODO: Deprecate this and make it raise an exception if an unknown property is encountered
# Force users to explicitly declare new/custom properties
self._properties.define_property(name=k, **v, units=units)
def define_custom_properties(self, p: dict):
"""Add custom properties to the metadata.
For each property, the value should be another dict which may contain
the following keys:
- 'units': (optional) units of measurement for the property.
- 'indices': (optional) list of sub-property indices for this property. If None, use default set, if False unindexed.
- 'method': (optional, only if 'indices' is None or False) the name of a method to construct the
property as a str, or None if the property will be
constructed by default.
- 'supported': (optional, only if 'indices' is None or False) bool indicating if this property is
supported by this package.
- 'required': (optional, only if 'indices' is None or False bool indicating if this property is
required by this package.
- 'initialize': (optional) dict indicating 'method', 'required' and 'supported' values for sub-properties by index.
Args:
p (dict): Key=property, Value=dict
Returns:
None
"""
for k, v in p.items():
self._properties.define_property(name=k, **v)
def add_required_properties(self, p: str):
# TODO: Deprecate
"""Add required properties to the metadata.
Update 'required' attribute of specified properties.
Note that argument must be a dict for backwards compatibility.
Args:
p (dict): Key=property, Value=(ignored)
Returns:
None
"""
for k in p.keys():
try:
self._properties[k].set_required(True)
except KeyError:
self._properties.define_property(name=k, supported=False, required=True)
def get_derived_units(self, units: str):
# TODO: Deprecate in favour of common units property
return self.derived_units[units]
|
df1b2f302d3688282a83ee7b9aee66d6f17ecddc
|
46a79c07c6ff972c8b1287bae29d2b3557cb0525
|
/src/systemtest/op-monitoring/xrd-opmon-tests/testcases/test_metaservices.py
|
d69404729542691dcb362954f0bc216d834ab397
|
[
"MIT"
] |
permissive
|
nordic-institute/X-Road
|
dee64a6fae2caf8e02f83a037368df4ed21399ff
|
25bdbff39e614c7258d6c202584604d3505e60e0
|
refs/heads/develop
| 2023-09-01T09:19:05.646215
| 2023-08-31T12:11:04
| 2023-08-31T12:11:04
| 134,143,184
| 559
| 190
|
NOASSERTION
| 2023-09-14T09:52:29
| 2018-05-20T11:06:37
|
Java
|
UTF-8
|
Python
| false
| false
| 13,994
|
py
|
test_metaservices.py
|
#!/usr/bin/env python3
# The MIT License
# Copyright (c) 2016 Estonian Information System Authority (RIA), Population Register Centre (VRK)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Test case for verifying that the operational monitoring related data
# of metaservice requests are stored by the operational monitoring
# daemon.
# It is also verified that central monitoring client has full access to
# operational monitoring data.
import os
import common
# Base sizes of request and responses.
# Parameters sizes must be added to these values.
LISTMETHODS_QUERY_REQUEST_SOAP_BASE_SIZE = 1062
# Disabling responseSoapSize check, because tested subsystems may have
# additional services
# LISTMETHODS_QUERY_RESPONSE_SOAP_BASE_SIZE = 2735
GET_SS_METRICS_QUERY_REQUEST_SOAP_BASE_SIZE = 1308
def _listmethods_query_request_parameters_size(query_parameters):
# Request template: listmethods_producer_query_template.xml
return (
len(query_parameters["producer_instance"])
+ len(query_parameters["producer_class"])
+ len(query_parameters["producer_code"])
+ len(query_parameters["producer_system"])
+ len(query_parameters["client_instance"])
+ len(query_parameters["client_class"])
+ len(query_parameters["client_code"])
+ len(query_parameters["client_system"])
)
def _expected_keys_and_values_of_listmethods_query_rec(
xroad_message_id, security_server_address, security_server_type, query_parameters):
request_parameters_size = _listmethods_query_request_parameters_size(query_parameters)
print("Size of listmethods query request parameters: {}".format(request_parameters_size))
return [
("clientMemberClass", query_parameters["client_class"]),
("clientMemberCode", query_parameters["client_code"]),
("clientSecurityServerAddress", query_parameters["client_server_address"]),
("clientSubsystemCode", query_parameters["client_system"]),
("clientXRoadInstance", query_parameters["client_instance"]),
("messageId", xroad_message_id),
("messageProtocolVersion", "4.0"),
("requestAttachmentCount", 0),
("requestSize", LISTMETHODS_QUERY_REQUEST_SOAP_BASE_SIZE + request_parameters_size),
("responseAttachmentCount", 0),
("securityServerInternalIp", security_server_address),
("securityServerType", security_server_type),
("serviceCode", "listMethods"),
("serviceMemberClass", query_parameters["producer_class"]),
("serviceMemberCode", query_parameters["producer_code"]),
("serviceSecurityServerAddress", query_parameters["producer_server_address"]),
("serviceSubsystemCode", query_parameters["producer_system"]),
("serviceVersion", "v1"),
("serviceXRoadInstance", query_parameters["producer_instance"]),
("succeeded", True),
]
def _get_ss_metrics_query_request_parameters_size(query_parameters):
# Request template: get_ss_metrics_query_template.xml
return (
2 * len(query_parameters["producer_instance"])
+ 2 * len(query_parameters["producer_class"])
+ 2 * len(query_parameters["producer_code"])
+ len(query_parameters["producer_server_code"])
+ len(query_parameters["client_instance"])
+ len(query_parameters["client_class"])
+ len(query_parameters["client_code"])
+ len(query_parameters["client_monitor_system"])
)
def _expected_keys_and_values_of_get_ss_metrics_query_rec(
xroad_message_id, security_server_address, security_server_type, query_parameters):
request_parameters_size = _get_ss_metrics_query_request_parameters_size(query_parameters)
print("Size of get ss metrics query request parameters: {}".format(request_parameters_size))
return [
("clientMemberClass", query_parameters["client_class"]),
("clientMemberCode", query_parameters["client_code"]),
("clientSecurityServerAddress", query_parameters["client_server_address"]),
("clientSubsystemCode", query_parameters["client_monitor_system"]),
("clientXRoadInstance", query_parameters["client_instance"]),
("messageId", xroad_message_id),
("messageProtocolVersion", "4.0"),
("requestAttachmentCount", 0),
("requestSize", GET_SS_METRICS_QUERY_REQUEST_SOAP_BASE_SIZE + request_parameters_size),
("responseAttachmentCount", 0),
("securityServerInternalIp", security_server_address),
("securityServerType", security_server_type),
("serviceCode", "getSecurityServerMetrics"),
("serviceMemberClass", query_parameters["producer_class"]),
("serviceMemberCode", query_parameters["producer_code"]),
("serviceSecurityServerAddress", query_parameters["producer_server_address"]),
("serviceXRoadInstance", query_parameters["producer_instance"]),
("succeeded", True),
]
def run(request_template_dir, query_parameters):
client_security_server_address = query_parameters["client_server_ip"]
producer_security_server_address = query_parameters["producer_server_ip"]
ssh_user = query_parameters["ssh_user"]
listmethods_query_template_filename = os.path.join(
request_template_dir, "listmethods_producer_query_template.xml")
get_ss_metrics_query_template_filename = os.path.join(
request_template_dir, "get_ss_metrics_query_template.xml")
query_data_client_template_filename = os.path.join(
request_template_dir,
"query_operational_data_client_central_monitoring_template.xml")
query_data_producer_template_filename = os.path.join(
request_template_dir,
"query_operational_data_producer_central_monitoring_template.xml")
client_timestamp_before_requests = common.get_remote_timestamp(
client_security_server_address, ssh_user)
producer_timestamp_before_requests = common.get_remote_timestamp(
producer_security_server_address, ssh_user)
message_id_listmethods = common.generate_message_id()
print("\nGenerated message ID {} for listMethods request".format(message_id_listmethods))
# Regular and operational data requests and the relevant checks
print("\n---- Sending a listMethods request to the client's security server ----\n")
request_contents = common.format_xroad_request_template(
listmethods_query_template_filename, message_id_listmethods, query_parameters)
print("Generated the following listMethods request: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part = common.get_multipart_soap(mime_parts[0])
common.print_multipart_soap(soap_part)
else:
common.parse_and_check_soap_response(raw_response)
message_id_get_ss_metrics = common.generate_message_id()
print("\nGenerated message ID {} for getSecurityServerMetrics request".format(
message_id_get_ss_metrics))
print("\n---- Sending a getSecurityServerMetrics request to "
"the client's security server ----\n")
request_contents = common.format_xroad_request_template(
get_ss_metrics_query_template_filename, message_id_get_ss_metrics, query_parameters)
print("Generated the following getSecurityServerMetrics request: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part = common.get_multipart_soap(mime_parts[0])
# getSecurityServerMetrics response is large, print only headers
common.print_multipart_soap_headers(soap_part)
# Program should never get here unless getSecurityServerMetrics
# will be changed to return data in attachments instead of
# SOAP Body
raise Exception("\nWARNING!!! getSecurityServerMetrics returned attachments\n")
else:
common.parse_and_check_soap_response(raw_response)
common.wait_for_operational_data()
client_timestamp_after_requests = common.get_remote_timestamp(
client_security_server_address, ssh_user)
producer_timestamp_after_requests = common.get_remote_timestamp(
producer_security_server_address, ssh_user)
# Now make operational data requests to both security servers and
# check the response payloads.
print("\n---- Sending an operational data request to the client's security server ----\n")
message_id = common.generate_message_id()
print("Generated message ID {} for query data request".format(message_id))
request_contents = common.format_query_operational_data_request_template(
query_data_client_template_filename, message_id,
client_timestamp_before_requests, client_timestamp_after_requests, query_parameters)
print("Generated the following query data request for the client's security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part, record_count = common.get_multipart_soap_and_record_count(mime_parts[0])
common.print_multipart_soap_and_record_count(soap_part, record_count)
json_payload = common.get_multipart_json_payload(mime_parts[1])
# Check the presence of all the required fields in at least
# one JSON structure.
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_listmethods_query_rec(
message_id_listmethods, client_security_server_address, "Client",
query_parameters))
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_get_ss_metrics_query_rec(
message_id_get_ss_metrics, client_security_server_address, "Client",
query_parameters))
# Check if the timestamps in the response are in the expected
# range.
common.assert_expected_timestamp_values(
json_payload,
client_timestamp_before_requests, client_timestamp_after_requests)
common.print_multipart_query_data_response(json_payload)
else:
common.parse_and_check_soap_response(raw_response)
# Central monitoring client is used as a service client in
# operational data request. As central monitoring client is
# registered in client's security server, let's send the
# operational data request to producer's security server via
# client's security server.
print("\n---- Sending an operational data request from central monitoring client "
"to the producer's security server ----\n")
message_id = common.generate_message_id()
print("\nGenerated message ID {} for query data request".format(message_id))
request_contents = common.format_query_operational_data_request_template(
query_data_producer_template_filename, message_id,
producer_timestamp_before_requests, producer_timestamp_after_requests,
query_parameters)
print("Generated the following query data request for the producer's "
"security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part, record_count = common.get_multipart_soap_and_record_count(mime_parts[0])
common.print_multipart_soap_and_record_count(soap_part, record_count, is_client=False)
json_payload = common.get_multipart_json_payload(mime_parts[1])
# Check the presence of all the required fields in at least
# one JSON structure.
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_listmethods_query_rec(
message_id_listmethods, producer_security_server_address, "Producer",
query_parameters))
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_get_ss_metrics_query_rec(
message_id_get_ss_metrics, producer_security_server_address, "Producer",
query_parameters))
# Check timestamp values
common.assert_expected_timestamp_values(
json_payload,
producer_timestamp_before_requests, producer_timestamp_after_requests)
common.assert_equal_timestamp_values(json_payload)
common.print_multipart_query_data_response(json_payload)
else:
common.parse_and_check_soap_response(raw_response)
|
5935aab0927558d1b8b76145c20add2a9f9edc65
|
7491ceb405287660538e876317d3f69328757651
|
/aydin/it/classic_denoisers/pca.py
|
ee0961e08ce16fe35d50bcb3f078cb73fef34d72
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
royerlab/aydin
|
4d0bd5cb1a3786cf32f1d8661d3a3aa13ec7cab1
|
9312f227605be26fce960373c1f29a71323da914
|
refs/heads/master
| 2023-04-29T20:45:42.515226
| 2023-02-16T22:21:07
| 2023-02-16T22:21:07
| 188,953,977
| 125
| 14
|
BSD-3-Clause
| 2023-03-15T01:04:16
| 2019-05-28T04:30:19
|
Python
|
UTF-8
|
Python
| false
| false
| 7,331
|
py
|
pca.py
|
import math
from functools import partial
from math import prod
from typing import Optional, Union, Tuple, List
import numpy
from numpy.typing import ArrayLike
from sklearn.decomposition import PCA
from aydin.it.classic_denoisers import _defaults
from aydin.util.crop.rep_crop import representative_crop
from aydin.util.j_invariance.j_invariance import calibrate_denoiser
from aydin.util.patch_size.patch_size import default_patch_size
from aydin.util.patch_transform.patch_transform import (
extract_patches_nd,
reconstruct_from_nd_patches,
)
def calibrate_denoise_pca(
image: ArrayLike,
patch_size: Optional[Union[int, Tuple[int], str]] = None,
crop_size_in_voxels: Optional[int] = _defaults.default_crop_size_normal.value,
optimiser: str = _defaults.default_optimiser.value,
max_num_evaluations: int = _defaults.default_max_evals_hyperlow.value,
blind_spots: Optional[List[Tuple[int]]] = _defaults.default_blind_spots.value,
jinv_interpolation_mode: str = _defaults.default_jinv_interpolation_mode.value,
multi_core: bool = True,
display_images: bool = False,
display_crop: bool = False,
**other_fixed_parameters,
):
"""
Calibrates the Principal Component Analysis (PCA) denoiser for the given
image and returns the optimal parameters obtained using the N2S loss.
Parameters
----------
image: ArrayLike
Image to calibrate spectral denoiser for.
patch_size: int
Patch size for the 'image-to-patch' transform.
Can be an int s that corresponds to isotropic patches of shape: (s,)*image.ndim,
or a tuple of ints. By default (None) the patch size is chosen automatically
to give the best results.
(advanced)
crop_size_in_voxels: int or None for default
Number of voxels for crop used to calibrate denoiser.
Increase this number by factors of two if denoising quality is
unsatisfactory -- this can be important for very noisy images.
Values to try are: 65000, 128000, 256000, 320000.
We do not recommend values higher than 512000.
optimiser: str
Optimiser to use for finding the best denoising
parameters. Can be: 'smart' (default), or 'fast' for a mix of SHGO
followed by L-BFGS-B.
(advanced)
max_num_evaluations: int
Maximum number of evaluations for finding the optimal parameters.
Increase this number by factors of two if denoising quality is
unsatisfactory.
blind_spots: bool
List of voxel coordinates (relative to receptive field center) to
be included in the blind-spot. For example, you can give a list of
3 tuples: [(0,0,0), (0,1,0), (0,-1,0)] to extend the blind spot
to cover voxels of relative coordinates: (0,0,0),(0,1,0), and (0,-1,0)
(advanced) (hidden)
jinv_interpolation_mode: str
J-invariance interpolation mode for masking. Can be: 'median' or
'gaussian'.
(advanced)
multi_core: bool
Use all CPU cores during calibration.
(advanced)
display_images: bool
When True the denoised images encountered during optimisation are shown.
(advanced) (hidden)
display_crop: bool
Displays crop, for debugging purposes...
(advanced) (hidden)
other_fixed_parameters: dict
Any other fixed parameters
Returns
-------
Denoising function, dictionary containing optimal parameters,
and free memory needed in bytes for computation.
"""
# Convert image to float if needed:
image = image.astype(dtype=numpy.float32, copy=False)
# obtain representative crop, to speed things up...
crop = representative_crop(
image, crop_size=crop_size_in_voxels, display_crop=display_crop
)
# Normalise patch size:
patch_size = default_patch_size(image, patch_size, odd=True)
# Ranges:
threshold_range = list(numpy.linspace(0, 1, max_num_evaluations).tolist())
# Parameters to test when calibrating the denoising algorithm
parameter_ranges = {'threshold': threshold_range}
# Combine fixed parameters:
other_fixed_parameters = other_fixed_parameters | {'patch_size': patch_size}
# Partial function:
_denoise_pca = partial(
denoise_pca, **(other_fixed_parameters | {'multi_core': multi_core})
)
# Calibrate denoiser
best_parameters = (
calibrate_denoiser(
crop,
_denoise_pca,
mode=optimiser,
denoise_parameters=parameter_ranges,
interpolation_mode=jinv_interpolation_mode,
max_num_evaluations=max_num_evaluations,
blind_spots=blind_spots,
display_images=display_images,
)
| other_fixed_parameters
)
# Memory needed:
memory_needed = 2 * image.nbytes + 6 * image.nbytes * math.prod(patch_size)
return denoise_pca, best_parameters, memory_needed
def denoise_pca(
image: ArrayLike,
patch_size: Optional[Union[int, Tuple[int]]] = None,
threshold: float = 0.1,
reconstruction_gamma: float = 0,
multi_core: bool = True,
):
"""
Denoises the given image by first applying the patch
transform, and then doing a PCA projection of these patches
along the first components. The cut-off is set by a
threshold parameter.
Parameters
----------
image: ArrayLike
Image to denoise
patch_size: int
Patch size for the 'image-to-patch' transform.
Can be: 'full' for a single patch covering the whole image, 'half', 'quarter',
or an int s that corresponds to isotropic patches of shape: (s,)*image.ndim,
or a tuple of ints. By default (None) the patch size is chosen automatically
to give the best results.
threshold: float
Threshold for proportion of components to retain. Between 0 and 1
reconstruction_gamma: float
Patch reconstruction parameter
multi_core: bool
By default we use as many cores as possible, in some cases, for small
(test) images, it might be faster to run on a single core instead of
starting the whole parallelization machinery.
Returns
-------
Denoised image
"""
# Convert image to float if needed:
image = image.astype(dtype=numpy.float32, copy=False)
# Normalise patch size:
patch_size = default_patch_size(image, patch_size, odd=True)
# First we apply the patch transform:
patches = extract_patches_nd(image, patch_size=patch_size)
# reshape patches as vectors:
original_patches_shape = patches.shape
patches = patches.reshape(patches.shape[0], -1)
# PCA dim reduction setup:
n_components = 1 + max(0, int(threshold * (prod(patch_size) - 1)))
pca = PCA(n_components=n_components)
# Project patches:
patches = pca.inverse_transform(pca.fit_transform(patches))
# reshape patches back to their original shape:
patches = patches.reshape(original_patches_shape)
# Transform back from patches to image:
denoised_image = reconstruct_from_nd_patches(
patches, image.shape, gamma=reconstruction_gamma
)
# Cast back to float32 if needed:
denoised_image = denoised_image.astype(numpy.float32, copy=False)
return denoised_image
|
f6996259a7ee172cad25b11bdbf19c6d5dac26ca
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/tests/terraform/parser/test_parser_internals.py
|
0ebe085070a47cb6a78726df66da8c37ef9218f4
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
test_parser_internals.py
|
from pathlib import Path
from checkov.common.util.parser_utils import eval_string
from checkov.terraform.tf_parser import load_or_die_quietly
def test_eval_string_to_list():
# given
expected = ["a", "b", "c"]
# when
actual = eval_string('["a", "b", "c"]')
assert actual == expected
def test__load_or_die_quietly_with_bom():
# given
test_file = Path(__file__).parent / "resources/file_bom/with_bom.tf"
parsing_errors = {}
# when
definition = load_or_die_quietly(file=test_file, parsing_errors=parsing_errors)
# then
assert not parsing_errors
assert definition == {
"resource": [
{
"aws_s3_bucket": {
"example": {"bucket": ["example"], "__start_line__": 1, "__end_line__": 3},
},
}
]
}
def test__load_or_die_quietly_without_bom():
# given
test_file = Path(__file__).parent / "resources/file_bom/without_bom.tf"
parsing_errors = {}
# when
definition = load_or_die_quietly(file=test_file, parsing_errors=parsing_errors)
# then
assert not parsing_errors
assert definition == {
"resource": [
{
"aws_s3_bucket": {
"example": {"bucket": ["example"], "__start_line__": 1, "__end_line__": 3},
},
}
]
}
|
596dd9882c6fba31334f82b57db6c98003d1504d
|
23dcd65d2b4664cfdca5819b9ca56c1419d3adaa
|
/official/vision/gan/megengine_mimicry/nets/wgan/wgan_cifar.py
|
b75b4e8aceedbbe65c6120a5f720063c906bbec3
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
MegEngine/Models
|
8456439ab0a9d7f548c418f25947f42ead3f1bce
|
78882f9cbaa037ad701f47d47bb80b66ad95ce87
|
refs/heads/master
| 2022-12-09T04:43:49.434068
| 2022-10-31T12:09:38
| 2022-10-31T12:09:38
| 248,175,266
| 319
| 116
|
NOASSERTION
| 2023-01-29T08:15:05
| 2020-03-18T08:22:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,270
|
py
|
wgan_cifar.py
|
# Copyright (c) 2020 Kwot Sin Lee
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
from ..blocks import GBlock
from . import wgan_base
from .wgan_base import WGANDBlockOptimized as DBlockOptimized
from .wgan_base import WGANDBlockWithLayerNorm as DBlock
class WGANGeneratorCIFAR(wgan_base.WGANBaseGenerator):
r"""
ResNet backbone generator for ResNet WGAN.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz=128, ngf=256, bottom_width=4, **kwargs):
super().__init__(nz=nz, ngf=ngf, bottom_width=bottom_width, **kwargs)
# Build the layers
self.l1 = M.Linear(self.nz, (self.bottom_width**2) * self.ngf)
self.block2 = GBlock(self.ngf, self.ngf, upsample=True)
self.block3 = GBlock(self.ngf, self.ngf, upsample=True)
self.block4 = GBlock(self.ngf, self.ngf, upsample=True)
self.b5 = M.BatchNorm2d(self.ngf)
self.c5 = M.Conv2d(self.ngf, 3, 3, 1, padding=1)
self.activation = M.ReLU()
# Initialise the weights
M.init.xavier_uniform_(self.l1.weight, 1.0)
M.init.xavier_uniform_(self.c5.weight, 1.0)
def forward(self, x):
r"""
Feedforwards a batch of noise vectors into a batch of fake images.
Args:
x (Tensor): A batch of noise vectors of shape (N, nz).
Returns:
Tensor: A batch of fake images of shape (N, C, H, W).
"""
h = self.l1(x)
h = h.reshape(x.shape[0], -1, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = F.tanh(self.c5(h))
return h
class WGANDiscriminatorCIFAR(wgan_base.WGANBaseDiscriminator):
r"""
ResNet backbone discriminator for ResNet WGAN.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf=128, **kwargs):
super().__init__(ndf=ndf, **kwargs)
# Build layers
self.block1 = DBlockOptimized(3, self.ndf)
self.block2 = DBlock(self.ndf,
self.ndf,
downsample=True)
self.block3 = DBlock(self.ndf,
self.ndf,
downsample=False)
self.block4 = DBlock(self.ndf,
self.ndf,
downsample=False)
self.l5 = M.Linear(self.ndf, 1)
self.activation = M.ReLU()
# Initialise the weights
M.init.xavier_uniform_(self.l5.weight, 1.0)
def forward(self, x):
r"""
Feedforwards a batch of real/fake images and produces a batch of GAN logits.
Args:
x (Tensor): A batch of images of shape (N, C, H, W).
Returns:
Tensor: A batch of GAN logits of shape (N, 1).
"""
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.activation(h)
# Global average pooling
h = h.mean(3).mean(2)
output = self.l5(h)
return output
|
d57febb60a3800c6fb6beb7c5507045ba4792e26
|
3daa53a2190f365ee2e2acae39ca4e84919f2f50
|
/test/unit/common/middleware/test_subrequest_logging.py
|
20b71dff6763ce807e2e120255b8c3ee166d79ec
|
[
"Apache-2.0"
] |
permissive
|
openstack/swift
|
4c8e4a14c1c6f7efb049f983ede28e89bd2e9140
|
f06e5369579599648cc78e4b556887bc6d978c2b
|
refs/heads/master
| 2023-08-28T15:04:33.200849
| 2023-08-24T20:35:07
| 2023-08-24T21:05:48
| 790,019
| 2,370
| 957
|
Apache-2.0
| 2023-06-22T02:45:53
| 2010-07-22T01:50:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,644
|
py
|
test_subrequest_logging.py
|
# Copyright (c) 2016-2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.middleware import copy, proxy_logging
from swift.common.swob import Request, HTTPOk
from swift.common.utils import close_if_possible
from swift.common.wsgi import make_subrequest
from test.debug_logger import debug_logger
from test.unit.common.middleware.helpers import FakeSwift
SUB_GET_PATH = '/v1/a/c/sub_get'
SUB_PUT_POST_PATH = '/v1/a/c/sub_put'
class FakeFilter(object):
def __init__(self, app, conf, register):
self.body = ['FAKE MIDDLEWARE']
self.conf = conf
self.app = app
self.register = register
self.logger = None
def __call__(self, env, start_response):
path = SUB_PUT_POST_PATH
if env['REQUEST_METHOD'] == 'GET':
path = SUB_GET_PATH
# Make a subrequest that will be logged
hdrs = {'content-type': 'text/plain'}
sub_req = make_subrequest(env, path=path,
method=self.conf['subrequest_type'],
headers=hdrs,
agent='FakeApp',
swift_source='FA')
self.register(self.conf['subrequest_type'],
path, HTTPOk, headers=hdrs)
resp = sub_req.get_response(self.app)
close_if_possible(resp.app_iter)
return self.app(env, start_response)
class FakeApp(object):
def __init__(self, conf):
self.fake_logger = debug_logger()
self.fake_swift = self.app = FakeSwift()
self.register = self.fake_swift.register
for filter in reversed([
proxy_logging.filter_factory,
copy.filter_factory,
lambda conf: lambda app: FakeFilter(app, conf, self.register),
proxy_logging.filter_factory]):
self.app = filter(conf)(self.app)
self.app.logger = self.fake_logger
if hasattr(self.app, 'access_logger'):
self.app.access_logger = self.fake_logger
if conf['subrequest_type'] == 'GET':
self.register(conf['subrequest_type'], SUB_GET_PATH, HTTPOk, {})
else:
self.register(conf['subrequest_type'],
SUB_PUT_POST_PATH, HTTPOk, {})
@property
def __call__(self):
return self.app.__call__
class TestSubRequestLogging(unittest.TestCase):
path = '/v1/a/c/o'
def _test_subrequest_logged(self, subrequest_type):
# Test that subrequests made downstream from Copy PUT will be logged
# with the request type of the subrequest as opposed to the GET/PUT.
app = FakeApp({'subrequest_type': subrequest_type})
hdrs = {'content-type': 'text/plain', 'X-Copy-From': 'test/obj'}
req = Request.blank(self.path, method='PUT', headers=hdrs)
app.register('PUT', self.path, HTTPOk, headers=hdrs)
app.register('GET', '/v1/a/test/obj', HTTPOk, headers=hdrs)
req.get_response(app)
info_log_lines = app.fake_logger.get_lines_for_level('info')
self.assertEqual(len(info_log_lines), 4)
subreq_get = '%s %s' % (subrequest_type, SUB_GET_PATH)
subreq_put = '%s %s' % (subrequest_type, SUB_PUT_POST_PATH)
origput = 'PUT %s' % self.path
copyget = 'GET %s' % '/v1/a/test/obj'
# expect GET subreq, copy GET, PUT subreq, orig PUT
self.assertTrue(subreq_get in info_log_lines[0])
self.assertTrue(copyget in info_log_lines[1])
self.assertTrue(subreq_put in info_log_lines[2])
self.assertTrue(origput in info_log_lines[3])
def test_subrequest_logged_x_copy_from(self):
self._test_subrequest_logged('HEAD')
self._test_subrequest_logged('GET')
self._test_subrequest_logged('POST')
self._test_subrequest_logged('PUT')
self._test_subrequest_logged('DELETE')
def _test_subrequest_logged_POST(self, subrequest_type):
app = FakeApp({'subrequest_type': subrequest_type})
hdrs = {'content-type': 'text/plain'}
req = Request.blank(self.path, method='POST', headers=hdrs)
app.register('POST', self.path, HTTPOk, headers=hdrs)
expect_lines = 2
req.get_response(app)
info_log_lines = app.fake_logger.get_lines_for_level('info')
self.assertEqual(len(info_log_lines), expect_lines)
self.assertTrue('Copying object' not in info_log_lines[0])
subreq_put_post = '%s %s' % (subrequest_type, SUB_PUT_POST_PATH)
origpost = 'POST %s' % self.path
# fast post expect POST subreq, original POST
self.assertTrue(subreq_put_post in info_log_lines[0])
self.assertTrue(origpost in info_log_lines[1])
def test_subrequest_logged_with_POST(self):
self._test_subrequest_logged_POST('HEAD')
self._test_subrequest_logged_POST('GET')
self._test_subrequest_logged_POST('POST')
self._test_subrequest_logged_POST('PUT')
self._test_subrequest_logged_POST('DELETE')
if __name__ == '__main__':
unittest.main()
|
66b1b82b18f3e55305d395f3fd0ed46b1c82d869
|
f40bd24edfe91765a9ca7e4b1561ad487068beb0
|
/python/brainfuck-simple.py
|
e2ca37d60aca179ee79894b4b3ed04562b69cc02
|
[
"MIT"
] |
permissive
|
pablojorge/brainfuck
|
4a487a140a1b7bd49ec73322f833249488252e5b
|
153924714ae5e569ec39dcf0c0a5b5ae33600cc6
|
refs/heads/master
| 2022-03-11T18:50:10.588762
| 2021-02-10T04:19:13
| 2021-02-10T04:19:13
| 3,670,680
| 250
| 53
| null | 2016-09-17T14:12:51
| 2012-03-09T13:03:40
|
Brainfuck
|
UTF-8
|
Python
| false
| false
| 1,609
|
py
|
brainfuck-simple.py
|
import sys
import optparse
def precompute_jumps(program):
stack = []
ret = {}
pc = 0
while not pc == len(program):
opcode = program[pc]
if opcode == "[":
stack.append(pc)
elif opcode == "]":
target = stack.pop()
ret[target] = pc
ret[pc] = target
pc += 1
return ret
def run(program):
buffer = [0]
jump_map = precompute_jumps(program)
ptr = 0
pc = 0
while not pc == len(program):
opcode = program[pc]
if opcode == ">":
ptr += 1
if ptr == len(buffer): buffer.append(0)
elif opcode == "<": ptr -= 1
elif opcode == "+": buffer[ptr] += 1
elif opcode == "-": buffer[ptr] -= 1
elif opcode == ".":
sys.stdout.write(chr(buffer[ptr]))
sys.stdout.flush()
elif opcode == ",":
buffer[ptr] = ord(sys.stdin.read(1))
elif opcode == "[":
if buffer[ptr] == 0: pc = jump_map[pc]
elif opcode == "]":
if buffer[ptr] != 0: pc = jump_map[pc]
pc += 1
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="Verbosity ON")
options, args = parser.parse_args()
if args:
with open(args[0], "r") as input_file:
contents = input_file.read()
else:
contents = sys.stdin.read()
program = filter(lambda c: c in "<>-+[],.", contents)
run(program)
|
b26ba00b02abf63c954569081343b0b580b191cd
|
fcde019e3a8188c56c691497e97cb4f6f135bfa9
|
/src/deeponto/align/evaluation.py
|
5de65d5fbe051e038572f8b9746a2024c9db013f
|
[
"Apache-2.0"
] |
permissive
|
KRR-Oxford/DeepOnto
|
4fab7b0c2e80388605391a026d2104d3eef8a05e
|
3e4fae7b1481b214b2e1c4ed51341446461d67dc
|
refs/heads/main
| 2023-08-16T19:52:16.415872
| 2023-08-10T20:36:20
| 2023-08-10T20:36:20
| 430,473,960
| 112
| 8
|
Apache-2.0
| 2023-07-08T15:08:28
| 2021-11-21T20:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 4,782
|
py
|
evaluation.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, List
import math
from .mapping import *
class AlignmentEvaluator:
"""Class that provides evaluation metrics for alignment."""
def __init__(self):
pass
@staticmethod
def precision(prediction_mappings: List[EntityMapping], reference_mappings: List[ReferenceMapping]) -> float:
r"""The percentage of correct predictions.
$$P = \frac{|\mathcal{M}_{pred} \cap \mathcal{M}_{ref}|}{|\mathcal{M}_{pred}|}$$
"""
preds = [p.to_tuple() for p in prediction_mappings]
refs = [r.to_tuple() for r in reference_mappings]
return len(set(preds).intersection(set(refs))) / len(set(preds))
@staticmethod
def recall(prediction_mappings: List[EntityMapping], reference_mappings: List[ReferenceMapping]) -> float:
r"""The percentage of correct retrievals.
$$R = \frac{|\mathcal{M}_{pred} \cap \mathcal{M}_{ref}|}{|\mathcal{M}_{ref}|}$$
"""
preds = [p.to_tuple() for p in prediction_mappings]
refs = [r.to_tuple() for r in reference_mappings]
return len(set(preds).intersection(set(refs))) / len(set(refs))
@staticmethod
def f1(
prediction_mappings: List[EntityMapping],
reference_mappings: List[ReferenceMapping],
null_reference_mappings: List[ReferenceMapping] = [],
):
r"""Compute the F1 score given the prediction and reference mappings.
$$F_1 = \frac{2 P R}{P + R}$$
`null_reference_mappings` is an additional set whose elements
should be **ignored** in the calculation, i.e., **neither positive nor negative**.
Specifically, both $\mathcal{M}_{pred}$ and $\mathcal{M}_{ref}$ will **substract**
$\mathcal{M}_{null}$ from them.
"""
preds = [p.to_tuple() for p in prediction_mappings]
refs = [r.to_tuple() for r in reference_mappings]
null_refs = [n.to_tuple() for n in null_reference_mappings]
# elements in the {null_set} are removed from both {pred} and {ref} (ignored)
if null_refs:
preds = set(preds) - set(null_refs)
refs = set(refs) - set(null_refs)
P = len(set(preds).intersection(set(refs))) / len(set(preds))
R = len(set(preds).intersection(set(refs))) / len(set(refs))
F1 = 2 * P * R / (P + R)
return {"P": round(P, 3), "R": round(R, 3), "F1": round(F1, 3)}
##################################################################################
### [Eval Case 2]: Hits@K & MRR ###
##################################################################################
@staticmethod
def hits_at_K(reference_and_candidates: List[Tuple[ReferenceMapping, List[EntityMapping]]], K: int):
r"""Compute $Hits@K$ for a list of `(reference_mapping, candidate_mappings)` pair.
It is computed as the number of a `reference_mapping` existed in the first $K$ ranked `candidate_mappings`,
divided by the total number of input pairs.
$$Hits@K = \sum_i^N \mathbb{I}_{rank_i \leq k} / N$$
"""
n_hits = 0
for pred, cands in reference_and_candidates:
ordered_candidates = [c.to_tuple() for c in EntityMapping.sort_entity_mappings_by_score(cands, k=K)]
if pred.to_tuple() in ordered_candidates:
n_hits += 1
return n_hits / len(reference_and_candidates)
@staticmethod
def mean_reciprocal_rank(reference_and_candidates: List[Tuple[ReferenceMapping, List[EntityMapping]]]):
r"""Compute $MRR$ for a list of `(reference_mapping, candidate_mappings)` pair.
$$MRR = \sum_i^N rank_i^{-1} / N$$
"""
sum_inverted_ranks = 0
for pred, cands in reference_and_candidates:
ordered_candidates = [c.to_tuple() for c in EntityMapping.sort_entity_mappings_by_score(cands)]
if pred.to_tuple() in ordered_candidates:
rank = ordered_candidates.index(pred.to_tuple()) + 1
else:
rank = math.inf
sum_inverted_ranks += 1 / rank
return sum_inverted_ranks / len(reference_and_candidates)
|
84c7367f3672c76b5292f32aeb829fc94053136f
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-essential-contacts/google/cloud/essential_contacts_v1/services/essential_contacts_service/client.py
|
46aa5ab50ac937512b2d996eaa80b4366f55c074
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 48,971
|
py
|
client.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.essential_contacts_v1 import gapic_version as package_version
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.cloud.essential_contacts_v1.services.essential_contacts_service import (
pagers,
)
from google.cloud.essential_contacts_v1.types import enums, service
from .transports.base import DEFAULT_CLIENT_INFO, EssentialContactsServiceTransport
from .transports.grpc import EssentialContactsServiceGrpcTransport
from .transports.grpc_asyncio import EssentialContactsServiceGrpcAsyncIOTransport
from .transports.rest import EssentialContactsServiceRestTransport
class EssentialContactsServiceClientMeta(type):
"""Metaclass for the EssentialContactsService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[EssentialContactsServiceTransport]]
_transport_registry["grpc"] = EssentialContactsServiceGrpcTransport
_transport_registry["grpc_asyncio"] = EssentialContactsServiceGrpcAsyncIOTransport
_transport_registry["rest"] = EssentialContactsServiceRestTransport
def get_transport_class(
cls,
label: Optional[str] = None,
) -> Type[EssentialContactsServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class EssentialContactsServiceClient(metaclass=EssentialContactsServiceClientMeta):
"""Manages contacts for important Google Cloud notifications."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "essentialcontacts.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EssentialContactsServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EssentialContactsServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> EssentialContactsServiceTransport:
"""Returns the transport used by the client instance.
Returns:
EssentialContactsServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def contact_path(
project: str,
contact: str,
) -> str:
"""Returns a fully-qualified contact string."""
return "projects/{project}/contacts/{contact}".format(
project=project,
contact=contact,
)
@staticmethod
def parse_contact_path(path: str) -> Dict[str, str]:
"""Parses a contact path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/contacts/(?P<contact>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variable is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Optional[Union[str, EssentialContactsServiceTransport]] = None,
client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the essential contacts service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, EssentialContactsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, EssentialContactsServiceTransport):
# transport is a EssentialContactsServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def create_contact(
self,
request: Optional[Union[service.CreateContactRequest, dict]] = None,
*,
parent: Optional[str] = None,
contact: Optional[service.Contact] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> service.Contact:
r"""Adds a new contact for a resource.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import essential_contacts_v1
def sample_create_contact():
# Create a client
client = essential_contacts_v1.EssentialContactsServiceClient()
# Initialize request argument(s)
contact = essential_contacts_v1.Contact()
contact.email = "email_value"
contact.notification_category_subscriptions = ['TECHNICAL_INCIDENTS']
contact.language_tag = "language_tag_value"
request = essential_contacts_v1.CreateContactRequest(
parent="parent_value",
contact=contact,
)
# Make the request
response = client.create_contact(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.essential_contacts_v1.types.CreateContactRequest, dict]):
The request object. Request message for the CreateContact
method.
parent (str):
Required. The resource to save this contact for. Format:
organizations/{organization_id}, folders/{folder_id} or
projects/{project_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
contact (google.cloud.essential_contacts_v1.types.Contact):
Required. The contact to create. Must
specify an email address and language
tag.
This corresponds to the ``contact`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.essential_contacts_v1.types.Contact:
A contact that will receive
notifications from Google Cloud.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, contact])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a service.CreateContactRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.CreateContactRequest):
request = service.CreateContactRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if contact is not None:
request.contact = contact
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_contact]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_contact(
self,
request: Optional[Union[service.UpdateContactRequest, dict]] = None,
*,
contact: Optional[service.Contact] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> service.Contact:
r"""Updates a contact.
Note: A contact's email address cannot be changed.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import essential_contacts_v1
def sample_update_contact():
# Create a client
client = essential_contacts_v1.EssentialContactsServiceClient()
# Initialize request argument(s)
contact = essential_contacts_v1.Contact()
contact.email = "email_value"
contact.notification_category_subscriptions = ['TECHNICAL_INCIDENTS']
contact.language_tag = "language_tag_value"
request = essential_contacts_v1.UpdateContactRequest(
contact=contact,
)
# Make the request
response = client.update_contact(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.essential_contacts_v1.types.UpdateContactRequest, dict]):
The request object. Request message for the UpdateContact
method.
contact (google.cloud.essential_contacts_v1.types.Contact):
Required. The contact resource to
replace the existing saved contact.
Note:
the email address of the contact cannot
be modified.
This corresponds to the ``contact`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. The update mask applied to the resource. For
the ``FieldMask`` definition, see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.essential_contacts_v1.types.Contact:
A contact that will receive
notifications from Google Cloud.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([contact, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a service.UpdateContactRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.UpdateContactRequest):
request = service.UpdateContactRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if contact is not None:
request.contact = contact
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_contact]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("contact.name", request.contact.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_contacts(
self,
request: Optional[Union[service.ListContactsRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListContactsPager:
r"""Lists the contacts that have been set on a resource.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import essential_contacts_v1
def sample_list_contacts():
# Create a client
client = essential_contacts_v1.EssentialContactsServiceClient()
# Initialize request argument(s)
request = essential_contacts_v1.ListContactsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_contacts(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.essential_contacts_v1.types.ListContactsRequest, dict]):
The request object. Request message for the ListContacts
method.
parent (str):
Required. The parent resource name. Format:
organizations/{organization_id}, folders/{folder_id} or
projects/{project_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.essential_contacts_v1.services.essential_contacts_service.pagers.ListContactsPager:
Response message for the ListContacts
method.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a service.ListContactsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.ListContactsRequest):
request = service.ListContactsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_contacts]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListContactsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_contact(
self,
request: Optional[Union[service.GetContactRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> service.Contact:
r"""Gets a single contact.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import essential_contacts_v1
def sample_get_contact():
# Create a client
client = essential_contacts_v1.EssentialContactsServiceClient()
# Initialize request argument(s)
request = essential_contacts_v1.GetContactRequest(
name="name_value",
)
# Make the request
response = client.get_contact(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.essential_contacts_v1.types.GetContactRequest, dict]):
The request object. Request message for the GetContact
method.
name (str):
Required. The name of the contact to retrieve. Format:
organizations/{organization_id}/contacts/{contact_id},
folders/{folder_id}/contacts/{contact_id} or
projects/{project_id}/contacts/{contact_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.essential_contacts_v1.types.Contact:
A contact that will receive
notifications from Google Cloud.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a service.GetContactRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.GetContactRequest):
request = service.GetContactRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_contact]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_contact(
self,
request: Optional[Union[service.DeleteContactRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a contact.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import essential_contacts_v1
def sample_delete_contact():
# Create a client
client = essential_contacts_v1.EssentialContactsServiceClient()
# Initialize request argument(s)
request = essential_contacts_v1.DeleteContactRequest(
name="name_value",
)
# Make the request
client.delete_contact(request=request)
Args:
request (Union[google.cloud.essential_contacts_v1.types.DeleteContactRequest, dict]):
The request object. Request message for the DeleteContact
method.
name (str):
Required. The name of the contact to delete. Format:
organizations/{organization_id}/contacts/{contact_id},
folders/{folder_id}/contacts/{contact_id} or
projects/{project_id}/contacts/{contact_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a service.DeleteContactRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.DeleteContactRequest):
request = service.DeleteContactRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_contact]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def compute_contacts(
self,
request: Optional[Union[service.ComputeContactsRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ComputeContactsPager:
r"""Lists all contacts for the resource that are
subscribed to the specified notification categories,
including contacts inherited from any parent resources.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import essential_contacts_v1
def sample_compute_contacts():
# Create a client
client = essential_contacts_v1.EssentialContactsServiceClient()
# Initialize request argument(s)
request = essential_contacts_v1.ComputeContactsRequest(
parent="parent_value",
)
# Make the request
page_result = client.compute_contacts(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.essential_contacts_v1.types.ComputeContactsRequest, dict]):
The request object. Request message for the
ComputeContacts method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.essential_contacts_v1.services.essential_contacts_service.pagers.ComputeContactsPager:
Response message for the
ComputeContacts method.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a service.ComputeContactsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.ComputeContactsRequest):
request = service.ComputeContactsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.compute_contacts]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ComputeContactsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def send_test_message(
self,
request: Optional[Union[service.SendTestMessageRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Allows a contact admin to send a test message to
contact to verify that it has been configured correctly.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import essential_contacts_v1
def sample_send_test_message():
# Create a client
client = essential_contacts_v1.EssentialContactsServiceClient()
# Initialize request argument(s)
request = essential_contacts_v1.SendTestMessageRequest(
contacts=['contacts_value1', 'contacts_value2'],
resource="resource_value",
notification_category="TECHNICAL_INCIDENTS",
)
# Make the request
client.send_test_message(request=request)
Args:
request (Union[google.cloud.essential_contacts_v1.types.SendTestMessageRequest, dict]):
The request object. Request message for the
SendTestMessage method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a service.SendTestMessageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.SendTestMessageRequest):
request = service.SendTestMessageRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.send_test_message]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def __enter__(self) -> "EssentialContactsServiceClient":
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=package_version.__version__
)
__all__ = ("EssentialContactsServiceClient",)
|
6c4f3ebd0095e774c92d774ea60f3411a1910282
|
0841643267b9fc1478f6e3d21bfccb17aba67af6
|
/gs_quant/test/markets/test_baskets.py
|
d6cf500883f9910efc89c90d365f260b436119d2
|
[
"Apache-2.0"
] |
permissive
|
goldmansachs/gs-quant
|
55618e0e4e961d4ee50b7393f27c258e2647a957
|
4cf8ec75c4d85b16ec08371c46cc1a9ede9d72a2
|
refs/heads/master
| 2023-08-20T00:55:43.324547
| 2023-08-16T16:55:22
| 2023-08-16T16:55:22
| 161,840,815
| 2,088
| 596
|
Apache-2.0
| 2023-08-16T16:55:23
| 2018-12-14T21:10:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 12,018
|
py
|
test_baskets.py
|
"""
Copyright 2018 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from typing import Dict
from unittest import mock
import datetime as dt
import pytest
from gs_quant.api.gs.assets import AssetClass, AssetType, GsAsset, GsAssetApi
from gs_quant.api.gs.indices import GsIndexApi
from gs_quant.api.gs.reports import GsReportApi
from gs_quant.api.gs.users import GsUsersApi
from gs_quant.entities.entitlements import User
from gs_quant.errors import MqError
from gs_quant.markets.baskets import Basket, ErrorMessage
from gs_quant.markets.indices_utils import ReturnType
from gs_quant.markets.position_set import Position, PositionSet
from gs_quant.session import GsSession, Environment
from gs_quant.target.common import Entitlements as TargetEntitlements, \
PositionSet as TargetPositionSet, Position as TargetPosition, ReportParameters, XRef
from gs_quant.target.indices import CustomBasketsResponse, CustomBasketRiskParams
from gs_quant.target.reports import Report, User as TargetUser
# Helper mock value constants
asset_1 = {'name': 'asset 1', 'id': 'id1', 'bbid': 'bbid1'}
asset_2 = {'name': 'asset 2', 'id': 'id2', 'bbid': 'bbid2'}
assets_data = [asset_1, asset_2]
base_user = {'name': 'First Last', 'email': 'ex@email.com', 'company': 'Company A'}
cb_response = CustomBasketsResponse('done', 'R1234567890', 'MA1234567890')
gs_asset = GsAsset(asset_class=AssetClass.Equity,
type_=AssetType.Custom_Basket,
name='Test Basket',
id_='MA1234567890',
entitlements=TargetEntitlements(admin=['guid:user_abc']),
xref=XRef(ticker='GSMBXXXX'))
initial_price = {'price': 100}
mqid = 'MA1234567890'
name = 'Test Basket'
positions = [Position('bbid1', asset_id='id1', quantity=100), Position('bbid2', asset_id='id2', quantity=200)]
positions_weighted = positions = [Position('bbid1', asset_id='id1', weight=0.4),
Position('bbid2', asset_id='id2', weight=0.6)]
position_set = PositionSet(positions, divisor=1000)
report = Report(mqid, 'asset', 'Basket Create', ReportParameters(), status='done')
resolved_asset = {'GSMBXXXX': [{'id': mqid}]}
target_positions = tuple([TargetPosition(asset_id='id1', quantity=100), TargetPosition(asset_id='id2', quantity=200)])
target_position_set = TargetPositionSet(target_positions, dt.date(2021, 1, 7), divisor=1000)
ticker = 'GSMBXXXX'
user_ea = {**base_user, 'id': 'user_abc', 'tokens': ['external', 'guid:user_abc']} # external, admin
user_ena = {**base_user, 'id': 'user_xyz', 'tokens': ['external', 'guid:user_xyz']} # external, non admin
user_ia = {**base_user, 'id': 'user_abc', 'tokens': ['internal', 'guid:user_abc']} # internal, admin
@mock.patch.object(GsSession.__class__, 'default_value')
def mock_session(mocker):
""" Mock GsSession helper """
mocker.return_value = GsSession.get(Environment.QA, 'client_id', 'secret')
def mock_response(mocker, mock_object, mock_fn, mock_response):
""" Mock patch helper """
if mock_response is not None:
mocker.patch.object(mock_object, mock_fn, return_value=mock_response)
def mock_basket_init(mocker, user: Dict, existing: bool = True):
""" Mock basket initialization helper """
if existing:
mock_response(mocker, GsAssetApi, 'resolve_assets', resolved_asset)
mock_response(mocker, GsAssetApi, 'get_asset', gs_asset)
mock_response(mocker, GsAssetApi, 'get_latest_positions', target_position_set)
mock_response(mocker, GsAssetApi, 'get_many_assets_data', assets_data)
mock_response(mocker, GsIndexApi, 'initial_price', initial_price)
mock_response(mocker, GsReportApi, 'get_reports', [report])
mock_response(mocker, GsUsersApi, 'get_users', [TargetUser.from_dict(user)])
mock_response(mocker, GsUsersApi, 'get_current_user_info', user)
def test_basket_error_messages(mocker):
mock_session()
# test non admin errors
mock_basket_init(mocker, user_ena)
basket = Basket.get(ticker)
with pytest.raises(MqError, match=ErrorMessage.NON_ADMIN.value):
basket.cancel_rebalance()
with pytest.raises(MqError, match=ErrorMessage.NON_ADMIN.value):
basket.allow_ca_restricted_assets = False
# test non internal errors
with pytest.raises(MqError, match=ErrorMessage.NON_INTERNAL.value):
basket.flagship = False
# test unmodifiable errors
with pytest.raises(MqError, match=ErrorMessage.UNMODIFIABLE.value):
basket.ticker = 'GSMBZZZZ'
# test uninitialized errors
mock_basket_init(mocker, user_ena, False)
basket = Basket()
with pytest.raises(MqError, match=ErrorMessage.UNINITIALIZED.value):
basket.clone()
with pytest.raises(MqError, match=ErrorMessage.UNINITIALIZED.value):
basket.get_latest_rebalance_data()
def test_basket_create(mocker):
mock_session()
mock_basket_init(mocker, user_ea, False)
mock_response(mocker, GsIndexApi, 'validate_ticker', True)
basket = Basket()
basket.name = name
basket.ticker = ticker
basket.position_set = position_set
basket.return_type = ReturnType.PRICE_RETURN
mock_response(mocker, GsIndexApi, 'create', cb_response)
mock_response(mocker, GsAssetApi, 'get_asset', gs_asset)
mock_response(mocker, GsReportApi, 'get_report', report)
mock_basket_init(mocker, user_ea)
response = basket.create()
GsIndexApi.create.assert_called()
assert response == cb_response.as_dict()
def test_basket_clone(mocker):
mock_session()
# test uninitialized errors
mock_basket_init(mocker, user_ea, False)
basket = Basket()
with pytest.raises(MqError, match=ErrorMessage.UNINITIALIZED.value):
basket.clone()
# test clone
mock_basket_init(mocker, user_ena)
parent_basket = Basket.get(ticker)
clone = parent_basket.clone()
mock_basket_init(mocker, user_ea, False)
parent_positions = [p.as_dict() for p in parent_basket.position_set.positions]
clone_positions = [p.as_dict() for p in clone.position_set.positions]
assert clone_positions == parent_positions
assert clone.clone_parent_id == mqid
assert clone.parent_basket == ticker
def test_basket_edit(mocker):
mock_session()
# test errors
mock_basket_init(mocker, user_ea, False)
basket = Basket()
with pytest.raises(MqError, match=ErrorMessage.UNINITIALIZED.value):
basket.update()
mock_basket_init(mocker, user_ena)
basket = Basket.get(ticker)
with pytest.raises(MqError, match=ErrorMessage.NON_ADMIN.value):
basket.update()
# test update
mock_basket_init(mocker, user_ia)
basket = Basket.get(ticker)
basket.description = 'New Basket Description'
gs_asset.description = 'New Basket Description'
mock_response(mocker, GsIndexApi, 'edit', cb_response)
mock_response(mocker, GsAssetApi, 'get_asset', gs_asset)
mock_response(mocker, GsReportApi, 'get_report', report)
mock_basket_init(mocker, user_ia)
response = basket.update()
GsIndexApi.edit.assert_called()
assert response == cb_response.as_dict()
assert basket.description == 'New Basket Description'
gs_asset.description = None
def test_basket_rebalance(mocker):
mock_session()
mock_basket_init(mocker, user_ia)
basket = Basket.get(ticker)
basket.allow_ca_restricted_assets = True
mock_response(mocker, GsIndexApi, 'rebalance', cb_response)
mock_response(mocker, GsAssetApi, 'get_asset', gs_asset)
mock_response(mocker, GsReportApi, 'get_report', report)
mock_basket_init(mocker, user_ia)
response = basket.update()
GsIndexApi.rebalance.assert_called()
assert response == cb_response.as_dict()
def test_basket_edit_and_rebalance(mocker):
mock_session()
mock_basket_init(mocker, user_ia)
basket = Basket.get(ticker)
basket.description = 'New Basket Description'
gs_asset.description = 'New Basket Description'
basket.allow_ca_restricted_assets = True
mock_response(mocker, GsIndexApi, 'edit', cb_response)
mock_response(mocker, GsReportApi, 'get_report', report)
mock_response(mocker, GsIndexApi, 'rebalance', cb_response)
mock_response(mocker, GsAssetApi, 'get_asset', gs_asset)
mock_response(mocker, GsReportApi, 'get_report', report)
mock_basket_init(mocker, user_ia)
response = basket.update()
GsIndexApi.edit.assert_called()
GsIndexApi.rebalance.assert_called()
assert response == cb_response.as_dict()
assert basket.description == 'New Basket Description'
gs_asset.description = None
def test_basket_update_entitlements(mocker):
mock_session()
mock_basket_init(mocker, user_ia)
basket = Basket.get(ticker)
mock_response(mocker, GsUsersApi, 'get_users', [TargetUser.from_dict(user_ena)])
new_admin = User.get(user_id='user_xyz')
basket.entitlements.admin.users += [new_admin]
entitlements_response = TargetEntitlements(admin=['guid:user_abc', 'guid:user_xyz'])
mock_response(mocker, GsAssetApi, 'update_asset_entitlements', entitlements_response)
response = basket.update()
GsAssetApi.update_asset_entitlements.assert_called()
assert response == entitlements_response
def test_upload_position_history(mocker):
mock_session()
# test errors
mock_basket_init(mocker, user_ea, False)
basket = Basket()
with pytest.raises(MqError, match=ErrorMessage.UNINITIALIZED.value):
basket.upload_position_history()
mock_basket_init(mocker, user_ena)
basket = Basket.get(ticker)
with pytest.raises(MqError, match=ErrorMessage.NON_ADMIN.value):
basket.upload_position_history()
# test backcast
mock_basket_init(mocker, user_ia)
basket = Basket.get(ticker)
pos_set_1 = PositionSet(positions_weighted, dt.date(2021, 1, 1))
pos_set_2 = PositionSet(positions_weighted, dt.date(2021, 3, 1))
pos_set_3 = PositionSet(positions_weighted, dt.date(2021, 5, 1))
mock_response(mocker, GsIndexApi, 'backcast', cb_response)
response = basket.upload_position_history([pos_set_1, pos_set_2, pos_set_3])
GsIndexApi.backcast.assert_called()
assert response == cb_response.as_dict()
def test_update_risk_reports(mocker):
mock_session()
# test errors
mock_basket_init(mocker, user_ea, False)
basket = Basket()
with pytest.raises(MqError, match=ErrorMessage.UNINITIALIZED.value):
basket.add_factor_risk_report('AXUS4M', False)
with pytest.raises(MqError, match=ErrorMessage.UNINITIALIZED.value):
basket.delete_factor_risk_report('AXUS4M')
mock_basket_init(mocker, user_ena)
basket = Basket.get(ticker)
with pytest.raises(MqError, match=ErrorMessage.NON_ADMIN.value):
basket.add_factor_risk_report('AXUS4M', False)
with pytest.raises(MqError, match=ErrorMessage.NON_ADMIN.value):
basket.delete_factor_risk_report('AXUS4M')
# test add/delete factor risk reports
mock_basket_init(mocker, user_ea)
basket = Basket.get(ticker)
mock_response(mocker, GsIndexApi, 'update_risk_reports', {})
basket.add_factor_risk_report('AXUS4M', False)
payload = CustomBasketRiskParams(risk_model='AXUS4M', fx_hedged=False)
GsIndexApi.update_risk_reports.assert_called_with(payload)
mock_response(mocker, GsIndexApi, 'update_risk_reports', {})
basket.delete_factor_risk_report('AXUS4M')
payload = CustomBasketRiskParams(risk_model='AXUS4M', delete=True)
GsIndexApi.update_risk_reports.assert_called_with(payload)
|
aea523ba76e88692e0b109c746bc53d1e3369a35
|
fce81b804cae23f525a5ad4370b684bf0dc531a5
|
/numpy/lib/tests/test__version.py
|
e6d41ad939323792d31faa7ae517e6835ea851d1
|
[
"Zlib",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
numpy/numpy
|
ba2abcc1d2d46affbb6aabe5aed6407b4b57507e
|
dc2ff125493777a1084044e6cd6857a42ee323d4
|
refs/heads/main
| 2023-09-05T10:10:52.767363
| 2023-09-04T18:03:29
| 2023-09-04T18:03:29
| 908,607
| 25,725
| 11,968
|
BSD-3-Clause
| 2023-09-14T21:26:09
| 2010-09-13T23:02:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,999
|
py
|
test__version.py
|
"""Tests for the NumpyVersion class.
"""
from numpy.testing import assert_, assert_raises
from numpy.lib import NumpyVersion
def test_main_versions():
assert_(NumpyVersion('1.8.0') == '1.8.0')
for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']:
assert_(NumpyVersion('1.8.0') < ver)
for ver in ['1.7.0', '1.7.1', '0.9.9']:
assert_(NumpyVersion('1.8.0') > ver)
def test_version_1_point_10():
# regression test for gh-2998.
assert_(NumpyVersion('1.9.0') < '1.10.0')
assert_(NumpyVersion('1.11.0') < '1.11.1')
assert_(NumpyVersion('1.11.0') == '1.11.0')
assert_(NumpyVersion('1.99.11') < '1.99.12')
def test_alpha_beta_rc():
assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1')
for ver in ['1.8.0', '1.8.0rc2']:
assert_(NumpyVersion('1.8.0rc1') < ver)
for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
assert_(NumpyVersion('1.8.0rc1') > ver)
assert_(NumpyVersion('1.8.0b1') > '1.8.0a2')
def test_dev_version():
assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0')
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']:
assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver)
assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111')
def test_dev_a_b_rc_mixed():
assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111')
assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')
def test_dev0_version():
assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0')
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver)
assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')
def test_dev0_a_b_rc_mixed():
assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111')
assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')
def test_raises():
for ver in ['1.9', '1,9.0', '1.7.x']:
assert_raises(ValueError, NumpyVersion, ver)
|
ac66ef5c211bab515c69c84bfd9ee56212f81ffa
|
7260860cc391503e839929d77722004d17e47681
|
/django_dynamic_fixture/tests/test_ddf.py
|
5adadac2105a8c4300cebc2a9c40760b2ceb25e4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
paulocheque/django-dynamic-fixture
|
5937e144a4504ab84e05da673c7bf3ce90ad9d1e
|
5b5ffaca3bef3640feb2031feb3e895971b3b939
|
refs/heads/master
| 2023-08-30T20:56:47.857000
| 2023-08-27T01:29:16
| 2023-08-27T01:29:16
| 3,934,109
| 212
| 47
|
NOASSERTION
| 2023-09-05T20:29:14
| 2012-04-04T22:54:41
|
Python
|
UTF-8
|
Python
| false
| false
| 22,409
|
py
|
test_ddf.py
|
from datetime import datetime, date
from decimal import Decimal
import uuid
from django.test import TestCase
import pytest
from django_dynamic_fixture.models_test import *
from django_dynamic_fixture.ddf import *
from django_dynamic_fixture.fixture_algorithms.sequential_fixture import SequentialDataFixture
data_fixture = SequentialDataFixture()
class DDFTestCase(TestCase):
def setUp(self):
self.ddf = DynamicFixture(data_fixture)
class NewCreateAModelInstanceTest(DDFTestCase):
def test_new_create_a_non_saved_instance_of_the_model(self):
instance = self.ddf.new(EmptyModel)
assert isinstance(instance, EmptyModel)
assert instance.id is None
class GetDealWithPrimaryKeyTest(DDFTestCase):
def test_get_use_database_id_by_default(self):
instance = self.ddf.get(EmptyModel)
assert instance.id is not None
assert instance.pk is not None
def test_get_use_given_id(self):
instance = self.ddf.new(EmptyModel, id=99998)
assert instance.id == 99998
assert instance.pk == 99998
def test_get_use_given_named_id(self):
instance = self.ddf.get(ModelWithNamedPrimaryKey, named_pk=99998)
assert instance.named_pk == 99998
assert instance.pk == 99998
class NewFullFillAttributesWithAutoDataTest(DDFTestCase):
def test_new_fill_number_fields_with_numbers(self):
instance = self.ddf.new(ModelWithNumbers)
assert isinstance(instance.integer, int)
assert isinstance(instance.smallinteger, int)
assert isinstance(instance.positiveinteger, int)
assert isinstance(instance.positivesmallinteger, int)
assert isinstance(instance.biginteger, int)
assert isinstance(instance.float, float)
def test_new_fill_string_fields_with_text_type_strings(self):
instance = self.ddf.new(ModelWithStrings)
assert isinstance(instance.string, str)
assert isinstance(instance.text, str)
assert isinstance(instance.slug, str)
assert isinstance(instance.commaseparated, str)
def test_new_fill_boolean_fields_with_False_and_None(self):
instance = self.ddf.new(ModelWithBooleans)
assert instance.boolean is False
assert instance.nullboolean is None
def test_new_fill_time_related_fields_with_current_values(self):
instance = self.ddf.new(ModelWithDateTimes)
assert date.today() >= instance.date
assert datetime.now().time() >= instance.time
assert datetime.now() >= instance.datetime
def test_new_fill_formatted_strings_fields_with_basic_values(self):
instance = self.ddf.new(ModelWithFieldsWithCustomValidation)
assert isinstance(instance.email, str)
assert isinstance(instance.url, str)
assert isinstance(instance.ip, str)
assert isinstance(instance.ipv6, str)
def test_new_fill_file_fields_with_basic_strings(self):
instance = self.ddf.new(ModelWithFileFields)
assert isinstance(instance.filepath, str)
assert isinstance(instance.file.path, str)
try:
import pil
# just test it if the PIL package is installed
assert isinstance(instance.image, str)
except ImportError:
pass
def test_new_fill_binary_fields_with_basic_data(self):
value = b'\x00\x46\xFE'
instance = self.ddf.new(ModelWithBinary, binary=value)
assert bytes(instance.binary) == bytes(value)
instance = self.ddf.get(ModelWithBinary)
assert isinstance(instance.binary, bytes), type(instance.binary)
class NewFullFillAttributesWithDefaultDataTest(DDFTestCase):
def test_fill_field_with_default_data(self):
instance = self.ddf.new(ModelWithDefaultValues)
assert instance.integer_with_default == 3
def test_fill_field_with_possible_choices(self):
instance = self.ddf.new(ModelWithDefaultValues)
assert instance.string_with_choices == 'a'
def test_fill_field_with_default_value_even_if_field_is_foreign_key(self):
instance = self.ddf.new(ModelWithDefaultValues)
assert instance.foreign_key_with_default is None
def test_fill_field_with_default_data_and_choices_must_consider_default_data_instead_choices(self):
instance = self.ddf.new(ModelWithDefaultValues)
assert instance.string_with_choices_and_default == 'b'
def test_fill_field_with_possible_optgroup_choices(self):
instance = self.ddf.new(ModelWithDefaultValues)
assert instance.string_with_optgroup_choices == 'a'
class NewFullFillAttributesWithCustomDataTest(DDFTestCase):
def test_fields_are_filled_with_custom_attributes(self):
assert self.ddf.new(ModelWithNumbers, integer=9).integer == 9
assert self.ddf.new(ModelWithStrings, string='7').string == '7'
assert self.ddf.new(ModelWithBooleans, boolean=True).boolean
def test_decimal_can_be_filled_by_an_string(self):
self.ddf.get(ModelWithNumbers, decimal='9.5')
assert ModelWithNumbers.objects.latest('id').decimal == Decimal('9.5')
def test_fields_can_be_filled_by_functions(self):
instance = self.ddf.new(ModelWithStrings, string=lambda field: field.name)
assert instance.string == 'string'
def test_invalid_configuration_raise_an_error(self):
with pytest.raises(InvalidConfigurationError):
self.ddf.new(ModelWithNumbers, integer=lambda x: ''.invalidmethod())
def test_bad_data_raise_an_error(self):
self.ddf.get(ModelWithNumbers, integer=50000)
with pytest.raises(BadDataError):
self.ddf.get(ModelWithNumbers, integer=50000)
class NewIgnoringNullableFieldsTest(DDFTestCase):
def test_new_do_not_fill_nullable_fields_if_we_do_not_want_to(self):
self.ddf = DynamicFixture(data_fixture, fill_nullable_fields=False)
instance = self.ddf.new(ModelForNullable)
assert instance.not_nullable is not None
assert instance.nullable is None
class NewIgnoreFieldsInIgnoreListTest(DDFTestCase):
def test_new_do_not_fill_ignored_fields(self):
self.ddf = DynamicFixture(data_fixture, ignore_fields=['not_required', 'not_required_with_default'])
instance = self.ddf.new(ModelForIgnoreList)
assert instance.not_required is None
assert instance.not_required_with_default is not None
# not ignored fields
assert instance.required is not None
assert instance.required_with_default is not None
def test_get_raise_an_error_if_a_required_field_is_in_ignore_list(self):
self.ddf = DynamicFixture(data_fixture, ignore_fields=['required', 'required_with_default'])
with pytest.raises(BadDataError):
self.ddf.get(ModelForIgnoreList)
def test_ignore_fields_are_propagated_to_self_references(self):
self.ddf = DynamicFixture(data_fixture, ignore_fields=['not_required'], fk_min_depth=1, not_required=10)
instance = self.ddf.new(ModelForIgnoreList)
assert instance.not_required == 10
assert instance.self_reference is not None
assert instance.self_reference.not_required is None
def test_ignore_fields_are_not_propagated_to_different_references(self):
self.ddf = DynamicFixture(data_fixture, ignore_fields=['non_nullable'], different_reference=DynamicFixture(data_fixture))
instance = self.ddf.new(ModelForIgnoreList)
assert instance.different_reference is not None
assert instance.different_reference.non_nullable is not None
def test_ignore_fields_are_not_ignored_if_explicitely_given(self):
self.ddf = DynamicFixture(data_fixture, not_required=3, ignore_fields=['not_required', 'nullable'])
instance = self.ddf.new(ModelForIgnoreList)
assert instance.not_required == 3
class NewAlsoCreatesRelatedObjectsTest(DDFTestCase):
def test_new_fill_foreignkey_fields(self):
instance = self.ddf.new(ModelWithRelationships)
assert isinstance(instance.foreignkey, ModelRelated)
def test_new_fill_onetoone_fields(self):
instance = self.ddf.new(ModelWithRelationships)
assert isinstance(instance.onetoone, ModelRelated)
def test_new_deal_with_default_values(self):
instance = self.ddf.new(ModelWithRelationships)
assert isinstance(instance.foreignkey_with_default, ModelRelated), str(type(instance.foreignkey_with_default))
def test_new_deal_with_id_default_values(self):
instance = self.ddf.new(ModelWithRelationships)
assert isinstance(instance.foreignkey_with_id_default, ModelRelated), str(type(instance.foreignkey_with_default))
# TODO
# def test_new_fill_genericrelations_fields(self):
# instance = self.ddf.new(ModelWithRelationships)
# assert isinstance(instance.foreignkey, ModelRelated)
class NewCanCreatesCustomizedRelatedObjectsTest(DDFTestCase):
def test_customizing_nullable_fields_for_related_objects(self):
instance = self.ddf.new(ModelWithRelationships, selfforeignkey=DynamicFixture(data_fixture, fill_nullable_fields=True))
assert instance.integer is None
assert isinstance(instance.selfforeignkey.integer, int)
class NewDealWithSelfReferencesTest(DDFTestCase):
def test_new_create_by_default_no_self_fks(self):
instance = self.ddf.new(ModelWithRelationships, fill_nullable_fields=False)
assert instance.selfforeignkey is None # no cycle
instance = self.ddf.new(ModelWithRelationships, fill_nullable_fields=True)
assert instance.selfforeignkey is None # no cycle
def test_new_create_only_1_lap_in_cycle(self):
self.ddf = DynamicFixture(data_fixture, fk_min_depth=1)
instance = self.ddf.new(ModelWithRelationships)
assert instance.selfforeignkey is not None # 1 cycle
assert instance.selfforeignkey.selfforeignkey is None # 2 cycles
def test_new_create_with_min_depth_2(self):
self.ddf = DynamicFixture(data_fixture, fk_min_depth=2)
instance = self.ddf.new(ModelWithRelationships)
assert instance.selfforeignkey is not None # 1 cycle
assert instance.selfforeignkey.selfforeignkey is not None # 2 cycles
assert instance.selfforeignkey.selfforeignkey.selfforeignkey is None # 3 cycles
def test_number_of_fk_cycles_does_not_break_default_non_null_fk(self):
self.ddf = DynamicFixture(data_fixture, fk_min_depth=0)
instance = self.ddf.new(ModelWithRefToParent)
assert instance.parent is not None
class GetFullFilledModelInstanceAndPersistTest(DDFTestCase):
def test_get_create_and_save_a_full_filled_instance_of_the_model(self):
instance = self.ddf.get(ModelWithRelationships)
assert isinstance(instance, ModelWithRelationships)
assert instance.id is not None
# checking unique problems
another_instance = self.ddf.get(ModelWithRelationships)
assert isinstance(another_instance, ModelWithRelationships)
assert another_instance.id is not None
def test_get_create_and_save_related_fields(self):
instance = self.ddf.get(ModelWithRelationships)
assert instance.selfforeignkey is None
assert instance.foreignkey is not None
assert instance.onetoone is not None
self.ddf = DynamicFixture(data_fixture, fk_min_depth=1)
instance = self.ddf.get(ModelWithRelationships)
assert instance.selfforeignkey is not None
class ManyToManyRelationshipTest(DDFTestCase):
def test_new_ignore_many_to_many_configuratios(self):
instance = self.ddf.new(ModelWithRelationships, manytomany=3)
instance.save()
assert instance.manytomany.all().count() == 0
def test_get_ignore_many_to_many_configuratios(self):
instance = self.ddf.get(ModelWithRelationships, manytomany=3)
assert instance.manytomany.all().count() == 3
def test_many_to_many_configuratios_accept_list_of_dynamic_filters(self):
instance = self.ddf.get(ModelWithRelationships, manytomany=[DynamicFixture(data_fixture, integer=1000), DynamicFixture(data_fixture, integer=1001)])
assert instance.manytomany.all().count() == 2
assert instance.manytomany.all()[0].integer == 1000
assert instance.manytomany.all()[1].integer == 1001
def test_many_to_many_configuratios_accept_list_of_instances(self):
b1 = self.ddf.get(ModelRelated, integer=1000)
b2 = self.ddf.get(ModelRelated, integer=1001)
instance = self.ddf.get(ModelWithRelationships, manytomany=[b1, b2])
assert instance.manytomany.all().count() == 2
objs = instance.manytomany.all().order_by('integer')
assert objs[0].integer == 1000
assert objs[1].integer == 1001
def test_invalid_many_to_many_configuration(self):
with pytest.raises(InvalidManyToManyConfigurationError):
self.ddf.get(ModelWithRelationships, manytomany='a')
def test_many_to_many_through(self):
b1 = self.ddf.get(ModelRelated, integer=1000)
b2 = self.ddf.get(ModelRelated, integer=1001)
instance = self.ddf.get(ModelWithRelationships, manytomany_through=[b1, b2])
objs = instance.manytomany_through.all().order_by('integer')
assert objs.count() == 2
assert objs[0].integer == 1000
assert objs[1].integer == 1001
class NewDealWithCyclicDependenciesTest(DDFTestCase):
def test_new_create_by_default_no_cycles(self):
a = self.ddf.new(ModelWithCyclicDependency)
assert a.model_b is None
def test_new_create_only_1_lap_in_fk_cycle(self):
self.ddf = DynamicFixture(data_fixture, fk_min_depth=1)
a = self.ddf.get(ModelWithCyclicDependency)
assert a.model_b.model_a is None
def test_new_create_with_min_depth_2(self):
self.ddf = DynamicFixture(data_fixture, fk_min_depth=2)
a = self.ddf.get(ModelWithCyclicDependency)
assert a.model_b.model_a.model_b is None
class NewDealWithInheritanceTest(DDFTestCase):
@pytest.mark.skipif(django.VERSION > (3, 2), reason="Not supported on Django 3.2+")
def test_new_must_not_raise_an_error_if_model_is_abstract(self):
self.ddf.new(ModelAbstract) # it does not raise an exceptions
def test_get_must_raise_an_error_if_model_is_abstract(self):
with pytest.raises(InvalidModelError):
self.ddf.get(ModelAbstract)
def test_get_must_fill_parent_fields_too(self):
instance = self.ddf.get(ModelParent)
assert isinstance(instance.integer, int)
assert ModelParent.objects.count() == 1
def test_get_must_fill_grandparent_fields_too(self):
instance = self.ddf.get(ModelChild)
assert isinstance(instance.integer, int)
assert ModelParent.objects.count() == 1
assert ModelChild.objects.count() == 1
def test_get_must_ignore_parent_link_attributes_but_the_parent_object_must_be_created(self):
instance = self.ddf.get(ModelChildWithCustomParentLink)
assert isinstance(instance.integer, int)
assert ModelParent.objects.count() == 1
assert ModelChildWithCustomParentLink.objects.count() == 1
assert instance.my_custom_ref.id is not None
assert instance.my_custom_ref.my_custom_ref_x.id is not None
# TODO: need to check these tests. Here we are trying to simulate a bug with parent_link attribute
def test_get_0(self):
instance = self.ddf.get(ModelWithRefToParent)
assert ModelWithRefToParent.objects.count() == 1
assert ModelParent.objects.count() == 1
assert isinstance(instance.parent, ModelParent)
def test_get_1(self):
instance = self.ddf.get(ModelWithRefToParent, parent=self.ddf.get(ModelChild))
assert ModelWithRefToParent.objects.count() == 1
assert ModelParent.objects.count() == 1
assert ModelChild.objects.count() == 1
assert isinstance(instance.parent, ModelChild)
def test_get_2(self):
instance = self.ddf.get(ModelWithRefToParent, parent=self.ddf.get(ModelChildWithCustomParentLink))
assert ModelWithRefToParent.objects.count() == 1
assert ModelParent.objects.count() == 1
assert ModelChildWithCustomParentLink.objects.count() == 1
assert isinstance(instance.parent, ModelChildWithCustomParentLink)
class ComplexFieldsTest(DDFTestCase):
def test_x(self):
instance = self.ddf.new(ModelForUUID)
assert isinstance(instance.uuid, uuid.UUID)
class ModelValidatorsTest(DDFTestCase):
def test_it_must_create_if_validation_is_disabled(self):
instance = self.ddf.get(ModelWithValidators, field_validator='nok', clean_validator='nok')
self.ddf.validate_models = False
assert instance.field_validator == 'nok'
assert instance.clean_validator == 'nok'
def test_it_must_create_if_there_is_no_validation_errors(self):
instance = self.ddf.get(ModelWithValidators, field_validator='ok', clean_validator='ok')
self.ddf.validate_models = True
assert instance.field_validator == 'ok'
assert instance.clean_validator == 'ok'
def test_it_must_raise_a_bad_data_error_if_data_is_not_valid(self):
self.ddf.validate_models = True
self.ddf.get(ModelWithValidators, field_validator='nok', clean_validator='ok')
with pytest.raises(BadDataError):
self.ddf.get(ModelWithValidators, field_validator='ok', clean_validator='nok')
class ConfigurationValidatorTest(DDFTestCase):
def test_it_must_raise_a_bad_data_error_if_data_is_not_valid(self):
with pytest.raises(InvalidConfigurationError):
self.ddf.get(EmptyModel, unexistent_field='x')
class DisableAutoGeneratedDateTimesTest(DDFTestCase):
def test_auto_generated_datetimes_must_be_respected_if_nothing_is_specified(self):
instance = self.ddf.get(ModelWithAutoDateTimes)
assert datetime.today().date() == instance.auto_now_add
assert datetime.today().date() == instance.auto_now
def test_it_must_ignore_auto_generated_datetime_if_a_custom_value_is_provided(self):
instance = self.ddf.get(ModelWithAutoDateTimes, auto_now_add=date(2000, 12, 31))
assert instance.auto_now_add == date(2000, 12, 31)
instance = self.ddf.get(ModelWithAutoDateTimes, auto_now=date(2000, 12, 31))
assert instance.auto_now == date(2000, 12, 31)
def test_checking_if_implementation_works_for_m2m_fields_too(self):
instance = self.ddf.get(ModelWithAutoDateTimes, manytomany=[DynamicFixture(data_fixture, auto_now_add=date(2000, 12, 31))])
assert instance.manytomany.all()[0].auto_now_add == date(2000, 12, 31)
instance = self.ddf.get(ModelWithAutoDateTimes, manytomany=[DynamicFixture(data_fixture, auto_now=date(2000, 12, 31))])
assert instance.manytomany.all()[0].auto_now == date(2000, 12, 31)
class ModelWithCustomValidationTest(DDFTestCase):
def test_ddf_can_not_create_instance_of_models_with_custom_validations(self):
self.ddf.validate_models = True
with pytest.raises(BadDataError):
self.ddf.get(ModelWithClean)
self.ddf.get(ModelWithClean, integer=9999) # this does not raise an exception
class ExceptionsLayoutMessagesTest(DDFTestCase):
def test_UnsupportedFieldError(self):
try:
self.ddf.new(ModelWithUnsupportedField)
self.fail()
except UnsupportedFieldError as e:
assert """django_dynamic_fixture.models_test.ModelWithUnsupportedField.z""" in str(e)
def test_BadDataError(self):
self.ddf = DynamicFixture(data_fixture, ignore_fields=['required', 'required_with_default'])
try:
self.ddf.get(ModelForIgnoreList)
self.fail()
except BadDataError as e:
assert 'IntegrityError' in str(e), str(e)
assert 'NULL' in str(e).upper(), str(e)
def test_InvalidConfigurationError(self):
try:
self.ddf.new(ModelWithNumbers, integer=lambda x: ''.invalidmethod())
self.fail()
except InvalidConfigurationError as e:
assert 'django_dynamic_fixture.models_test.ModelWithNumbers.integer' in str(e)
assert 'AttributeError' in str(e)
assert 'invalidmethod' in str(e)
def test_InvalidManyToManyConfigurationError(self):
try:
self.ddf.get(ModelWithRelationships, manytomany='a')
self.fail()
except InvalidManyToManyConfigurationError as e:
assert """('Field: manytomany', 'a')""" == str(e)
def test_InvalidModelError(self):
try:
self.ddf.get(ModelAbstract)
self.fail()
except InvalidModelError as e:
assert """django_dynamic_fixture.models_test.ModelAbstract""" == str(e)
def test_InvalidModelError_for_common_object(self):
class MyClass: pass
try:
self.ddf.new(MyClass)
self.fail()
except InvalidModelError as e:
assert """django_dynamic_fixture.tests.test_ddf.MyClass""" == str(e)
class SanityTest(DDFTestCase):
def test_create_lots_of_models_to_verify_data_unicity_errors(self):
for i in range(1000):
self.ddf.get(ModelWithNumbers)
class AvoidNameCollisionTest(DDFTestCase):
def test_avoid_common_name_instance(self):
self.ddf = DynamicFixture(data_fixture, fill_nullable_fields=False)
instance = self.ddf.new(ModelWithCommonNames)
assert instance.instance is not None
instance = self.ddf.new(ModelWithCommonNames, instance=3)
assert instance.instance == 3
instance = self.ddf.get(ModelWithCommonNames)
assert instance.instance is not None
instance = self.ddf.get(ModelWithCommonNames, instance=4)
assert instance.instance == 4
def test_avoid_common_name_field(self):
self.ddf = DynamicFixture(data_fixture, fill_nullable_fields=False)
instance = self.ddf.new(ModelWithCommonNames)
assert instance.field is not None
instance = self.ddf.new(ModelWithCommonNames, field=5)
assert instance.field == 5
instance = self.ddf.get(ModelWithCommonNames)
assert instance.field is not None
instance = self.ddf.get(ModelWithCommonNames, field=6)
assert instance.field == 6
|
9619d7c49bb13e82d672ac34a8657e2ded6b629c
|
6c8305ea1df9687df1c0d2b0ace56733516c6322
|
/readthedocs/subscriptions/constants.py
|
d46514a1fe4adebd85cbb2fa6333989a1d7188c6
|
[
"MIT"
] |
permissive
|
readthedocs/readthedocs.org
|
9806083aa744c2308267919480a692e1e003e45d
|
bf88ce6d1085d922322a5fadce63a22c5544c830
|
refs/heads/main
| 2023-09-05T20:22:34.281891
| 2023-09-05T12:41:52
| 2023-09-05T12:41:52
| 841,835
| 2,894
| 1,509
|
MIT
| 2023-09-14T20:36:00
| 2010-08-16T19:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
constants.py
|
"""Constants for subscriptions."""
from django.utils.translation import gettext_lazy as _
# Days after the subscription has ended to disable the organization
DISABLE_AFTER_DAYS = 30
TYPE_CNAME = "cname"
TYPE_CDN = "cdn"
TYPE_SSL = "ssl"
TYPE_SUPPORT = "support"
TYPE_PRIVATE_DOCS = "private_docs"
TYPE_EMBED_API = "embed_api"
TYPE_SEARCH_ANALYTICS = "search_analytics"
TYPE_PAGEVIEW_ANALYTICS = "pageviews_analytics"
TYPE_CONCURRENT_BUILDS = "concurrent_builds"
TYPE_SSO = "sso"
TYPE_CUSTOM_URL = "urls"
TYPE_AUDIT_LOGS = "audit-logs"
TYPE_AUDIT_PAGEVIEWS = "audit-pageviews"
FEATURE_TYPES = (
(TYPE_CNAME, _("Custom domain")),
(TYPE_CDN, _("CDN public documentation")),
(TYPE_SSL, _("Custom SSL configuration")),
(TYPE_SUPPORT, _("Support SLA")),
(TYPE_PRIVATE_DOCS, _("Private documentation")),
(TYPE_EMBED_API, _("Embed content via API")),
(TYPE_SEARCH_ANALYTICS, _("Search analytics")),
(TYPE_PAGEVIEW_ANALYTICS, _("Pageview analytics")),
(TYPE_CONCURRENT_BUILDS, _("Concurrent builds")),
(TYPE_SSO, _("Single sign on (SSO) with Google")),
(TYPE_CUSTOM_URL, _("Custom URLs")),
(TYPE_AUDIT_LOGS, _("Audit logs")),
(TYPE_AUDIT_PAGEVIEWS, _("Audit logs for every page view")),
)
|
11b2d6f98de53feba913c2705fa8950bfb6e6901
|
d3c7657f98fdffd6000218643704bfec003b8b0c
|
/pgmpy/tests/test_global_vars.py
|
1203c1b9fc07758a79243ea261ef0e96c9339992
|
[
"MIT"
] |
permissive
|
pgmpy/pgmpy
|
25066e623576b8346647eae34bb0248b6bfd1033
|
6d66bde4c7f140ba14892174c59370b2b7964e90
|
refs/heads/dev
| 2023-09-04T15:31:47.379242
| 2023-09-01T17:35:50
| 2023-09-01T17:35:50
| 12,968,651
| 2,617
| 854
|
MIT
| 2023-09-14T14:11:57
| 2013-09-20T08:18:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,410
|
py
|
test_global_vars.py
|
import unittest
import numpy as np
import torch
from pgmpy import config
class TestConfig(unittest.TestCase):
def test_defaults(self):
self.assertEqual(config.BACKEND, "numpy")
self.assertEqual(config.get_backend(), "numpy")
self.assertEqual(config.DTYPE, "float64")
self.assertEqual(config.get_dtype(), "float64")
self.assertEqual(config.DEVICE, None)
self.assertEqual(config.get_device(), None)
self.assertEqual(config.SHOW_PROGRESS, True)
self.assertEqual(config.get_show_progress(), True)
def test_torch_cpu(self):
config.set_backend(backend="torch", device="cpu", dtype=torch.float32)
self.assertEqual(config.BACKEND, "torch")
self.assertEqual(config.get_backend(), "torch")
self.assertEqual(config.DTYPE, torch.float32)
self.assertEqual(config.get_dtype(), torch.float32)
self.assertEqual(config.DEVICE, torch.device("cpu"))
self.assertEqual(config.get_device(), torch.device("cpu"))
self.assertEqual(config.SHOW_PROGRESS, True)
self.assertEqual(config.get_show_progress(), True)
@unittest.skipIf(not torch.cuda.is_available(), "No GPU")
def test_torch_gpu(self):
config.set_backend(backend="torch", device="cuda", dtype=torch.float32)
self.assertEqual(config.BACKEND, "torch")
self.assertEqual(config.get_backend(), "torch")
self.assertEqual(config.DTYPE, torch.float32)
self.assertEqual(config.get_dtype(), torch.float32)
self.assertEqual(config.DEVICE, torch.device("cuda"))
self.assertEqual(config.get_device(), torch.device("cuda"))
self.assertEqual(config.SHOW_PROGRESS, True)
self.assertEqual(config.get_show_progress(), True)
def test_no_progress(self):
config.set_show_progress(show_progress=False)
self.assertEqual(config.BACKEND, "numpy")
self.assertEqual(config.get_backend(), "numpy")
self.assertEqual(config.DTYPE, "float64")
self.assertEqual(config.get_dtype(), "float64")
self.assertEqual(config.DEVICE, None)
self.assertEqual(config.get_device(), None)
self.assertEqual(config.SHOW_PROGRESS, False)
self.assertEqual(config.get_show_progress(), False)
def tearDown(self):
config.set_backend("numpy")
config.set_show_progress(show_progress=True)
|
a38ad1ab24d847229f4e11a16a43c698706c4994
|
d32b0c51323e4a2106f82d3fdc81094fb03f989e
|
/guillotina/utils/content.py
|
cf2a2cd4754a69d2df08d23e6c2c06882188ff0c
|
[
"BSD-2-Clause"
] |
permissive
|
plone/guillotina
|
69f768bddc53b397471c3748eee6f01e7db619ab
|
9085dd8b788a5f081db5d799965d39831b2d4ee2
|
refs/heads/master
| 2023-05-01T13:23:30.323625
| 2022-12-12T14:47:56
| 2022-12-12T14:47:56
| 83,574,500
| 185
| 61
|
NOASSERTION
| 2023-04-16T11:58:58
| 2017-03-01T16:12:44
|
Python
|
UTF-8
|
Python
| false
| false
| 8,847
|
py
|
content.py
|
from .misc import get_current_request
from .misc import list_or_dict_items
from guillotina import glogging
from guillotina import task_vars
from guillotina._settings import app_settings
from guillotina.component import get_adapter
from guillotina.component import get_utility
from guillotina.component import query_multi_adapter
from guillotina.const import TRASHED_ID
from guillotina.db.interfaces import IDatabaseManager
from guillotina.db.orm.interfaces import IBaseObject
from guillotina.exceptions import DatabaseNotFound
from guillotina.interfaces import IAbsoluteURL
from guillotina.interfaces import IApplication
from guillotina.interfaces import IAsyncContainer
from guillotina.interfaces import IContainer
from guillotina.interfaces import IDatabase
from guillotina.interfaces import IPrincipalRoleMap
from guillotina.interfaces import IRequest
from guillotina.interfaces import IResource
import typing
logger = glogging.getLogger("guillotina")
def get_content_path(content: IResource) -> str:
"""
Generate path of resource object from the container
:param content: object to get path from
"""
parts = []
parent = getattr(content, "__parent__", None)
while (
content is not None
and content.__name__ is not None
and parent is not None
and not IContainer.providedBy(content)
):
parts.append(content.__name__)
content = parent
parent = getattr(content, "__parent__", None)
return "/" + "/".join(reversed(parts))
def get_content_depth(content: IResource) -> int:
"""
Calculate the depth of a resource object
"""
depth = 0
for _ in iter_parents(content):
depth += 1
return depth
def iter_parents(content: IResource) -> typing.Iterator[IResource]:
"""
Iterate through all the parents of a content object
:param content: object to get path from
"""
content = getattr(content, "__parent__", None)
while content is not None:
yield content
content = getattr(content, "__parent__", None)
def valid_id(_id) -> bool:
_id = _id.lower()
# can't start with _ or be path explorers
if _id in (None, ".", "..") or _id[0] in ("_", "@"):
return False
return _id == "".join([l for l in _id if l in app_settings["valid_id_characters"]]) # noqa: E741
async def get_containers():
root = get_utility(IApplication, name="root")
for _id, db in root:
if not IDatabase.providedBy(db):
continue
tm = db.get_transaction_manager()
async with tm:
task_vars.db.set(db)
async with tm.lock:
txn = await tm.begin()
try:
items = {}
async for c_id, container in db.async_items():
items[c_id] = container
finally:
try:
await tm.abort(txn=txn)
except Exception:
logger.warn("Error aborting transaction", exc_info=True)
for _, container in items.items():
with await tm.begin() as txn:
container.__txn__ = txn
task_vars.registry.set(None) # reset on new container
task_vars.container.set(container)
yield txn, tm, container
try:
# do not rely on consumer of object to always close it.
# there is no harm in aborting twice
await tm.abort(txn=txn)
except Exception:
logger.warn("Error aborting transaction", exc_info=True)
def get_owners(obj: IResource) -> list:
"""
Return owners of an object
:param obj: object to get path from
"""
try:
prinrole = IPrincipalRoleMap(obj)
except TypeError:
return []
owners = []
for user, roles in prinrole._bycol.items():
for role in roles:
if role == "guillotina.Owner":
owners.append(user)
if len(owners) == 0 and getattr(obj, "__parent__", None) is not None:
# owner can be parent if none found on current object
return get_owners(obj.__parent__)
return owners
async def navigate_to(obj: IAsyncContainer, path: str):
"""
Get a sub-object.
:param obj: object to get path from
:param path: relative path to object you want to retrieve
"""
actual = obj
path_components = path.strip("/").split("/")
for p in path_components:
if p != "":
try:
item = await actual.async_get(p)
except AttributeError: # 'actual' doesn't has method `async_get()`
item = None
if item is None:
raise KeyError(f"No {p} in {actual}")
else:
actual = item
return actual
def get_object_url(ob: IResource, request: IRequest = None, **kwargs) -> typing.Optional[str]:
"""
Generate full url of object.
:param ob: object to get url for
:param request: relative path to object you want to retrieve
"""
if request is None:
request = get_current_request()
url_adapter = query_multi_adapter((ob, request), IAbsoluteURL)
if url_adapter is not None:
return url_adapter(**kwargs)
return None
async def get_object_by_uid(uid: str, txn=None) -> IBaseObject:
"""
Get an object from an uid
:param uid: Object id of object you need to retreive
:param txn: Database transaction object. Will get current
transaction is not provided
"""
if txn is None:
from guillotina.transactions import get_transaction
txn = get_transaction()
result = txn._manager._hard_cache.get(uid, None)
if result is None:
result = await txn._get(uid)
if result["parent_id"] == TRASHED_ID:
raise KeyError(uid)
obj = app_settings["object_reader"](result)
obj.__txn__ = txn
if result["parent_id"]:
parent = await get_object_by_uid(result["parent_id"], txn)
if parent is not None:
obj.__parent__ = parent
else:
raise KeyError(result["parent_id"])
return obj
async def get_behavior(ob, iface, create=False):
"""
Generate behavior of object.
:param ob: object to get behavior for
:param interface: interface registered for behavior
:param create: if behavior data empty, should we create it?
"""
behavior = iface(ob, None)
if behavior is None:
return behavior
await behavior.load(create=create)
return behavior
async def iter_databases(root=None):
if root is None:
root = get_utility(IApplication, name="root")
loaded = []
for _, db in root:
if IDatabase.providedBy(db):
yield db
loaded.append(db.id)
last_checked = None
while last_checked is None or set(last_checked) != set(loaded):
# we need to continue checking until we're sure there aren't any
# new storage objects that have been added since we started
last_checked = loaded[:]
# from all dynamic storages
for _, config in list_or_dict_items(app_settings["storages"]):
ctype = config.get("type", config["storage"])
factory = get_adapter(root, IDatabaseManager, name=ctype, args=[config])
for db_name in await factory.get_names():
if db_name in loaded:
continue
db = await factory.get_database(db_name)
loaded.append(db.id)
yield db
async def get_database(db_id, root=None) -> IDatabase:
"""
Get configured database
:param db_id: configured database id
"""
if root is None:
root = get_utility(IApplication, name="root")
if db_id in root:
db = root[db_id]
if IDatabase.providedBy(db):
return db
for _, config in list_or_dict_items(app_settings["storages"]):
ctype = config.get("type", config["storage"])
factory = get_adapter(root, IDatabaseManager, name=ctype, args=[config])
databases = await factory.get_names()
if db_id in databases:
return await factory.get_database(db_id)
raise DatabaseNotFound(db_id)
def get_full_content_path(ob) -> str:
"""
Generate full path of resource object from root
:param content: object to get path from
"""
parts = []
while ob is not None and not IApplication.providedBy(ob):
if IDatabase.providedBy(ob):
parts.append(ob.__db_id__)
break
else:
parts.append(ob.__name__)
ob = getattr(ob, "__parent__", None)
return "/" + "/".join(reversed(parts))
|
5726c42792d93f2936798c89e9ff5a73a8118816
|
dcd772f567ef8a8a1173a9f437cd68f211fb9362
|
/tests/framework/OutStreams/testImageColorbar.py
|
f15bd8eb16edf3fb1dab411e83ef0de91c88ca91
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
idaholab/raven
|
39cdce98ad916c638399232cdc01a9be00e200a2
|
2b16e7aa3325fe84cab2477947a951414c635381
|
refs/heads/devel
| 2023-08-31T08:40:16.653099
| 2023-08-29T16:21:51
| 2023-08-29T16:21:51
| 85,989,537
| 201
| 126
|
Apache-2.0
| 2023-09-13T21:55:43
| 2017-03-23T19:29:27
|
C++
|
UTF-8
|
Python
| false
| false
| 2,809
|
py
|
testImageColorbar.py
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on 8/1/16
@author: maljdan
<TestInfo>
<name>framework.colorbar_plot</name>
<author>maljdan</author>
<created>2016-08-01</created>
<classesTested> </classesTested>
<description>
This test the online generation of plots (colorbar plot).
It can not be considered part of the active code but of the regression test
system. This file is a near identical copy of testImageGeneration.py and at
some point, we should see if the interface can be generalized to handle
image file diffs more generically. Right now, the number of image file
tests is small, but in order to be scalable, we need a better solution.
</description>
<revisions>
<revision author="maljdan" date="2016-05-04">Fixing the test for the compare executable to test the gold image against itself, if this returns a non-zero code, then the version of imageMagick cannot be used to get a valid difference. Also, I am removing the difference image and instead doing null: to remove the output file when using compare.</revision>
<revision author="alfoa" date="2017-01-21">Adding this test description.</revision>
</revisions>
</TestInfo>
(possibly promote to a diff class at some point, but relies on an external
application, namely ImageMagick) This test was created in reference to issue
#639
"""
import subprocess
import sys
import os
## Some tweakable parameters
differenceMetric = 'ae' ## Careful you may need to parse the output of different
## metrics since they don't all output a single value
fuzzAmount = '5%' ## This dictates how close pixel values need to be to be
## considered the same
## Make this resuable for an arbitrary pair of images and an arbitary test file
## by passing them all as variables
inputFile = 'test_colorbar.xml'
testImage = os.path.join('plot','colorbarTest.png')
goldImage = os.path.join('gold',testImage)
retCode = subprocess.call(['python','../../../framework/Driver.py',inputFile])
if retCode == 0:
proc = subprocess.Popen(['compare', '-metric', differenceMetric, '-fuzz',fuzzAmount, testImage,goldImage,'null:'],stderr=subprocess.PIPE)
retCode = int(proc.stderr.read())
sys.exit(retCode)
|
15ae5cb57520ca653abe2e89eb5322ab7bc400ba
|
cfb41f392fac304095a80d08497727c621550c00
|
/src/petclaw/solution.py
|
ce7b5cdb4e87a1696321fd8cf5ab5af4d28b93b7
|
[
"BSD-3-Clause"
] |
permissive
|
clawpack/pyclaw
|
5b7121b63609c2cf9af30e012c9318e3b5244f18
|
6323b7295b80f33285b958b1a2144f88f51be4b1
|
refs/heads/master
| 2023-04-16T23:48:31.519427
| 2023-03-21T06:08:21
| 2023-03-21T06:08:21
| 1,628,711
| 124
| 97
|
BSD-3-Clause
| 2023-09-12T12:22:30
| 2011-04-18T03:11:21
|
Fortran
|
UTF-8
|
Python
| false
| false
| 887
|
py
|
solution.py
|
from __future__ import absolute_import
from clawpack import pyclaw
from clawpack.pyclaw.solution import Solution
class Solution(Solution):
""" Parallel Solution class.
"""
__doc__ += pyclaw.util.add_parent_doc(pyclaw.Solution)
def get_read_func(self, file_format):
from clawpack.petclaw import fileio
if file_format == 'petsc':
return fileio.petsc.read
elif file_format == 'hdf5':
return fileio.hdf5.read
else:
raise ValueError("File format %s not supported." % file_format)
def get_write_func(self, file_format):
from clawpack.petclaw import fileio
if 'petsc' in file_format:
return fileio.petsc.write
elif 'hdf5' in file_format:
return fileio.hdf5.write
else:
raise ValueError("File format %s not supported." % file_format)
|
6419457db138bf8c85533455b6894468c7b29fc9
|
7c593f4cc70ee56106cc9cce105e6b9e7839431e
|
/tests/vectorize.py
|
170771e3e818fb2fb58610d6ae8fddc2917ea143
|
[
"Apache-2.0"
] |
permissive
|
google/objax
|
84e397cafb70813a1e89467f745facf828ed24b8
|
a2d025d9e1da8660a1883404207c41d4327d8c48
|
refs/heads/master
| 2023-09-02T07:04:26.801269
| 2023-06-12T22:12:53
| 2023-06-12T22:12:53
| 288,923,752
| 801
| 80
|
Apache-2.0
| 2023-06-12T22:12:54
| 2020-08-20T06:20:40
|
Python
|
UTF-8
|
Python
| false
| false
| 7,307
|
py
|
vectorize.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for Vectorize Layer."""
import unittest
import jax.numpy as jn
import numpy as np
import objax
class TestVectorize(unittest.TestCase):
def test_vectorize_module_one_arg(self):
"""Vectorize module with a single argument."""
f = objax.nn.Linear(3, 4)
fv = objax.Vectorize(f)
x = objax.random.normal((96, 3))
y = f(x)
yv = fv(x)
self.assertTrue(jn.array_equal(y, yv))
def test_vectorize_module_one_arg_transposed_axis(self):
"""Vectorize module with a single argument transposed axis."""
f = objax.nn.Linear(3, 4)
fv = objax.Vectorize(f, batch_axis=(1,))
x = objax.random.normal((96, 3))
y = f(x)
yv = fv(x.T)
self.assertTrue(jn.array_equal(y, yv))
def test_vectorize_module_two_args(self):
"""Vectorize module with a two arguments."""
f1 = objax.nn.Linear(3, 4)
f2 = objax.nn.Linear(5, 3)
f = lambda x, y: jn.concatenate([f1(x), f2(y)], axis=1)
fv = objax.Vectorize(lambda x, y: jn.concatenate([f1(x), f2(y)], axis=0), f1.vars('f1') + f2.vars('f2'),
batch_axis=(0, 0))
x1 = objax.random.normal((96, 3))
x2 = objax.random.normal((96, 5))
y = f(x1, x2)
yv = fv(x1, x2)
self.assertTrue(jn.array_equal(y, yv))
def test_vectorize_module_two_args_mixed_axis(self):
"""Vectorize module with a two arguments with mixed batch axis."""
f1 = objax.nn.Linear(3, 4)
f2 = objax.nn.Linear(5, 3)
f = lambda x, y: jn.concatenate([f1(x), f2(y)], axis=1)
fv = objax.Vectorize(lambda x, y: jn.concatenate([f1(x), f2(y)], axis=0), f1.vars('f1') + f2.vars('f2'),
batch_axis=(0, 1))
x1 = objax.random.normal((96, 3))
x2 = objax.random.normal((96, 5))
y = f(x1, x2)
yv = fv(x1, x2.T)
self.assertTrue(jn.array_equal(y, yv))
def test_vectorize_module_one_arg_one_static(self):
"""Vectorize module with a broadcast argument and a static one."""
f = objax.nn.Linear(3, 4)
c = objax.random.normal([4])
fv = objax.Vectorize(lambda x, y: f(x) + y, f.vars(), batch_axis=(0, None))
x = objax.random.normal((96, 3))
y = f(x) + c
yv = fv(x, c)
self.assertTrue(jn.array_equal(y, yv))
def test_vectorize_module_one_arg_one_static_positional_syntax(self):
"""Vectorize module with a broadcast argument and a static one using positional syntax."""
f = objax.nn.Linear(3, 4)
c = objax.random.normal([4])
fv = objax.Vectorize(lambda *a: f(a[0]) + a[1], f.vars(), batch_axis=(0, None))
x = objax.random.normal((96, 3))
y = f(x) + c
yv = fv(x, c)
self.assertTrue(jn.array_equal(y, yv))
def test_vectorize_module_one_arg_one_static_missing_batched(self):
"""Vectorize module with a broadcast argument and a static one with incomplete batch argument."""
f = objax.nn.Linear(3, 4)
with self.assertRaises(AssertionError):
_ = objax.Vectorize(lambda x, y: f(x) + y, f.vars(), batch_axis=(0,))
def test_vectorize_module_one_arg_one_static_missing_batched_call(self):
"""Vectorize module with a broadcast argument and a static one with incomplete batch argument.
Catch exception during call for variadic functions."""
f = objax.nn.Linear(3, 4)
c = objax.random.normal([4])
fv = objax.Vectorize(lambda *a: f(a[0]) + a[1], f.vars(), batch_axis=(0,))
x = objax.random.normal((96, 3))
with self.assertRaises(AssertionError):
_ = fv(x, c)
def test_vectorize_random_function(self):
class RandomReverse(objax.Module):
def __init__(self):
self.keygen = objax.random.Generator()
def __call__(self, x):
r = objax.random.randint([], low=0, high=2, generator=self.keygen)
return x + r * (x[::-1] - x), r
random_reverse = RandomReverse()
vector_reverse = objax.Vectorize(random_reverse)
x = jn.arange(20).reshape((10, 2))
y, r = vector_reverse(x)
self.assertEqual(r.tolist(), [1, 1, 1, 1, 1, 0, 0, 1, 0, 0])
self.assertEqual(y.tolist(), [[1, 0], [3, 2], [5, 4], [7, 6], [9, 8],
[10, 11], [12, 13], [15, 14], [16, 17], [18, 19]])
def test_vectorize_random_function_reseed(self):
class RandomReverse(objax.Module):
def __init__(self):
self.keygen = objax.random.Generator(1337)
def __call__(self, x):
r = objax.random.randint([], 0, 2, generator=self.keygen)
return x + r * (x[::-1] - x), r
random_reverse = RandomReverse()
vector_reverse = objax.Vectorize(random_reverse)
x = jn.arange(20).reshape((10, 2))
random_reverse.keygen.seed(0)
y, r = vector_reverse(x)
self.assertEqual(r.tolist(), [1, 1, 1, 1, 1, 0, 0, 1, 0, 0])
self.assertEqual(y.tolist(), [[1, 0], [3, 2], [5, 4], [7, 6], [9, 8],
[10, 11], [12, 13], [15, 14], [16, 17], [18, 19]])
def test_trainvar_assign(self):
m = objax.ModuleList([objax.TrainVar(jn.zeros(2))])
def increase(x):
m[0].assign(m[0].value + 1)
return x + 1
x = np.arange(10)[:, None]
vec_increase = objax.Vectorize(increase, m.vars())
y = vec_increase(x)
self.assertEqual(y.tolist(), np.arange(1, 11)[:, None].tolist())
self.assertEqual(m[0].value.tolist(), [1., 1.])
def test_trainvar_and_ref_assign(self):
m = objax.ModuleList([objax.TrainVar(jn.zeros(2))])
m.append(objax.TrainRef(m[0]))
def increase(x):
m[0].assign(m[0].value + 1)
m[1].assign(m[1].value + 1)
return x + 1
x = np.arange(10)[:, None]
vec_increase = objax.Vectorize(increase, m.vars())
y = vec_increase(x)
self.assertEqual(y.tolist(), np.arange(1, 11)[:, None].tolist())
self.assertEqual(m[0].value.tolist(), [2., 2.])
def test_trainvar_assign_multivalue(self):
m = objax.ModuleList([objax.TrainVar(jn.array((1., 2.)))])
def increase(x):
m[0].assign(m[0].value + x)
return x * 2
x = np.arange(10)[:, None]
vec_increase = objax.Vectorize(increase, m.vars())
y = vec_increase(x)
self.assertEqual(y.tolist(), (2 * np.arange(10))[:, None].tolist())
self.assertEqual(m[0].value.tolist(), [5.5, 6.5])
if __name__ == '__main__':
unittest.main()
|
9798528ceb24f8dea7f6240a1e6a729365e0d687
|
500bca3e22bd0c30c79b74918e9847742b3c428e
|
/v1/python-sdk/tutorials/using-rapids/src/rapids_csp_azure.py
|
a1890435ea7ecaa9366517b86e6fdb317158a5a7
|
[
"MIT"
] |
permissive
|
Azure/azureml-examples
|
2304c862fd2e36e6640ecc4d09f69c5ed93b48ab
|
e5f7b247d4753f115a8f7da30cbe25294f71f9d7
|
refs/heads/main
| 2023-08-31T00:10:14.107509
| 2023-08-30T17:29:22
| 2023-08-30T17:29:22
| 289,334,021
| 1,219
| 1,074
|
MIT
| 2023-09-14T16:00:55
| 2020-08-21T18:04:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 20,170
|
py
|
rapids_csp_azure.py
|
#
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import pprint
import sys
import time
import dask
import numpy as np
import pandas as pd
import psutil
import sklearn
from dask.distributed import Client, wait
from sklearn import ensemble
from sklearn.model_selection import train_test_split as sklearn_train_test_split
import cudf
import cuml
import cupy
import dask_cudf
import pynvml
import xgboost
from cuml.dask.common import utils as dask_utils
from cuml.dask.ensemble import RandomForestClassifier as cumlDaskRF
from cuml.metrics.accuracy import accuracy_score
from cuml.preprocessing.model_selection import train_test_split as cuml_train_test_split
from dask_cuda import LocalCUDACluster
from dask_ml.model_selection import train_test_split as dask_train_test_split
default_azureml_paths = {
"train_script": "./train_script",
"train_data": "./data_airline",
"output": "./output",
}
class RapidsCloudML(object):
def __init__(
self,
cloud_type="Azure",
model_type="RandomForest",
data_type="Parquet",
compute_type="single-GPU",
verbose_estimator=False,
CSP_paths=default_azureml_paths,
):
self.CSP_paths = CSP_paths
self.cloud_type = cloud_type
self.model_type = model_type
self.data_type = data_type
self.compute_type = compute_type
self.verbose_estimator = verbose_estimator
self.log_to_file(
f"\n> RapidsCloudML\n\tCompute, Data , Model, Cloud types {self.compute_type, self.data_type, self.model_type, self.cloud_type}"
)
# Setting up client for multi-GPU option
if "multi" in self.compute_type:
self.log_to_file("\n\tMulti-GPU selected")
# This will use all GPUs on the local host by default
cluster = LocalCUDACluster(threads_per_worker=1)
self.client = Client(cluster)
# Query the client for all connected workers
self.workers = self.client.has_what().keys()
self.n_workers = len(self.workers)
self.log_to_file(f"\n\tClient information {self.client}")
def load_hyperparams(self, model_name="XGBoost"):
"""
Selecting model paramters based on the model we select for execution.
Checks if there is a config file present in the path self.CSP_paths['hyperparams'] with
the parameters for the experiment. If not present, it returns the default parameters.
Parameters
----------
model_name : string
Selects which model to set the parameters for. Takes either 'XGBoost' or 'RandomForest'.
Returns
----------
model_params : dict
Loaded model parameters (dict)
"""
self.log_to_file("\n> Loading Hyperparameters")
# Default parameters of the models
if self.model_type == "XGBoost":
# https://xgboost.readthedocs.io/en/latest/parameter.html
model_params = {
"max_depth": 6,
"num_boost_round": 100,
"learning_rate": 0.3,
"gamma": 0.0,
"lambda": 1.0,
"alpha": 0.0,
"objective": "binary:logistic",
"random_state": 0,
}
elif self.model_type == "RandomForest":
# https://docs.rapids.ai/api/cuml/stable/ -> cuml.ensemble.RandomForestClassifier
model_params = {
"n_estimators": 10,
"max_depth": 10,
"n_bins": 16,
"max_features": 1.0,
"seed": 0,
}
hyperparameters = {}
try:
with open(self.CSP_paths["hyperparams"], "r") as file_handle:
hyperparameters = json.load(file_handle)
for key, value in hyperparameters.items():
model_params[key] = value
pprint.pprint(model_params)
return model_params
except Exception as error:
self.log_to_file(str(error))
return
def load_data(
self, filename="dataset.orc", col_labels=None, y_label="ArrDelayBinary"
):
"""
Loading the data into the object from the filename and based on the columns that we are
interested in. Also, generates y_label from 'ArrDelay' column to convert this into a binary
classification problem.
Parameters
----------
filename : string
the path of the dataset to be loaded
col_labels : list of strings
The input columns that we are interested in. None selects all the columns
y_label : string
The column to perform the prediction task in.
Returns
----------
dataset : dataframe (Pandas, cudf or dask-cudf)
Ingested dataset in the format of a dataframe
col_labels : list of strings
The input columns selected
y_label : string
The generated y_label name for binary classification
duration : float
The time it took to execute the function
"""
target_filename = filename
self.log_to_file(f"\n> Loading dataset from {target_filename}")
with PerfTimer() as ingestion_timer:
if "CPU" in self.compute_type:
# CPU Reading options
self.log_to_file(f"\n\tCPU read")
if self.data_type == "ORC":
with open(target_filename, mode="rb") as file:
dataset = pyarrow_orc.ORCFile(file).read().to_pandas()
elif self.data_type == "CSV":
dataset = pd.read_csv(target_filename, names=col_labels)
elif self.data_type == "Parquet":
if "single" in self.compute_type:
dataset = pd.read_parquet(target_filename)
elif "multi" in self.compute_type:
self.log_to_file(f"\n\tReading using dask dataframe")
dataset = dask.dataframe.read_parquet(
target_filename, columns=columns
)
elif "GPU" in self.compute_type:
# GPU Reading Option
self.log_to_file(f"\n\tGPU read")
if self.data_type == "ORC":
dataset = cudf.read_orc(target_filename)
elif self.data_type == "CSV":
dataset = cudf.read_csv(target_filename, names=col_labels)
elif self.data_type == "Parquet":
if "single" in self.compute_type:
dataset = cudf.read_parquet(target_filename)
elif "multi" in self.compute_type:
self.log_to_file(f"\n\tReading using dask_cudf")
dataset = dask_cudf.read_parquet(
target_filename, columns=col_labels
)
# cast all columns to float32
for col in dataset.columns:
dataset[col] = dataset[col].astype(np.float32) # needed for random forest
# Adding y_label column if it is not present
if y_label not in dataset.columns:
dataset[y_label] = 1.0 * (dataset["ArrDelay"] > 10)
dataset[y_label] = dataset[y_label].astype(np.int32) # Needed for cuml RF
dataset = dataset.fillna(0.0) # Filling the null values. Needed for dask-cudf
self.log_to_file(f"\n\tIngestion completed in {ingestion_timer.duration}")
self.log_to_file(
f"\n\tDataset descriptors: {dataset.shape}\n\t{dataset.dtypes}"
)
return dataset, col_labels, y_label, ingestion_timer.duration
def split_data(
self, dataset, y_label, train_size=0.8, random_state=0, shuffle=True
):
"""
Splitting data into train and test split, has appropriate imports for different compute modes.
CPU compute - Uses sklearn, we manually filter y_label column in the split call
GPU Compute - Single GPU uses cuml and multi GPU uses dask, both split y_label internally.
Parameters
----------
dataset : dataframe
The dataframe on which we wish to perform the split
y_label : string
The name of the column (not the series itself)
train_size : float
The size for the split. Takes values between 0 to 1.
random_state : int
Useful for running reproducible splits.
shuffle : binary
Specifies if the data must be shuffled before splitting.
Returns
----------
X_train : dataframe
The data to be used for training. Has same type as input dataset.
X_test : dataframe
The data to be used for testing. Has same type as input dataset.
y_train : dataframe
The label to be used for training. Has same type as input dataset.
y_test : dataframe
The label to be used for testing. Has same type as input dataset.
duration : float
The time it took to perform the split
"""
self.log_to_file("\n> Splitting train and test data")
start_time = time.perf_counter()
with PerfTimer() as split_timer:
if "CPU" in self.compute_type:
X_train, X_test, y_train, y_test = sklearn_train_test_split(
dataset.loc[:, dataset.columns != y_label],
dataset[y_label],
train_size=train_size,
shuffle=shuffle,
random_state=random_state,
)
elif "GPU" in self.compute_type:
if "single" in self.compute_type:
X_train, X_test, y_train, y_test = cuml_train_test_split(
X=dataset,
y=y_label,
train_size=train_size,
shuffle=shuffle,
random_state=random_state,
)
elif "multi" in self.compute_type:
X_train, X_test, y_train, y_test = dask_train_test_split(
dataset,
y_label,
train_size=train_size,
shuffle=False, # shuffle not available for dask_cudf yet
random_state=random_state,
)
self.log_to_file(f"\n\tX_train shape and type{X_train.shape} {type(X_train)}")
self.log_to_file(f"\n\tSplit completed in {split_timer.duration}")
return X_train, X_test, y_train, y_test, split_timer.duration
def train_model(self, X_train, y_train, model_params):
"""
Trains a model with the model_params specified by calling fit_xgboost or
fit_random_forest depending on the model_type.
Parameters
----------
X_train : dataframe
The data for traning
y_train : dataframe
The label to be used for training.
model_params : dict
The model params to use for this training
Returns
----------
trained_model : The object of the trained model either of XGBoost or RandomForest
training_time : float
The time it took to train the model
"""
self.log_to_file(f"\n> Training {self.model_type} estimator w/ hyper-params")
training_time = 0
try:
if self.model_type == "XGBoost":
trained_model, training_time = self.fit_xgboost(
X_train, y_train, model_params
)
elif self.model_type == "RandomForest":
trained_model, training_time = self.fit_random_forest(
X_train, y_train, model_params
)
except Exception as error:
self.log_to_file("\n\n!error during model training: " + str(error))
self.log_to_file(f"\n\tFinished training in {training_time:.4f} s")
return trained_model, training_time
def fit_xgboost(self, X_train, y_train, model_params):
"""
Trains a XGBoost model on X_train and y_train with model_params
Parameters and Objects returned are same as trained_model
"""
if "GPU" in self.compute_type:
model_params.update({"tree_method": "gpu_hist"})
else:
model_params.update({"tree_method": "hist"})
with PerfTimer() as train_timer:
if "single" in self.compute_type:
train_DMatrix = xgboost.DMatrix(data=X_train, label=y_train)
trained_model = xgboost.train(
dtrain=train_DMatrix,
params=model_params,
num_boost_round=model_params["num_boost_round"],
)
elif "multi" in self.compute_type:
self.log_to_file("\n\tTraining multi-GPU XGBoost")
train_DMatrix = xgboost.dask.DaskDMatrix(
self.client, data=X_train, label=y_train
)
trained_model = xgboost.dask.train(
self.client,
dtrain=train_DMatrix,
params=model_params,
num_boost_round=model_params["num_boost_round"],
)
return trained_model, train_timer.duration
def fit_random_forest(self, X_train, y_train, model_params):
"""
Trains a RandomForest model on X_train and y_train with model_params.
Depending on compute_type, estimators from appropriate packages are used.
CPU - sklearn
Single-GPU - cuml
multi_gpu - cuml.dask
Parameters and Objects returned are same as trained_model
"""
if "CPU" in self.compute_type:
rf_model = sklearn.ensemble.RandomForestClassifier(
n_estimators=model_params["n_estimators"],
max_depth=model_params["max_depth"],
max_features=model_params["max_features"],
n_jobs=int(self.n_workers),
verbose=self.verbose_estimator,
)
elif "GPU" in self.compute_type:
if "single" in self.compute_type:
rf_model = cuml.ensemble.RandomForestClassifier(
n_estimators=model_params["n_estimators"],
max_depth=model_params["max_depth"],
n_bins=model_params["n_bins"],
max_features=model_params["max_features"],
verbose=self.verbose_estimator,
)
elif "multi" in self.compute_type:
self.log_to_file("\n\tFitting multi-GPU daskRF")
X_train, y_train = dask_utils.persist_across_workers(
self.client,
[X_train.fillna(0.0), y_train.fillna(0.0)],
workers=self.workers,
)
rf_model = cuml.dask.ensemble.RandomForestClassifier(
n_estimators=model_params["n_estimators"],
max_depth=model_params["max_depth"],
n_bins=model_params["n_bins"],
max_features=model_params["max_features"],
verbose=self.verbose_estimator,
)
with PerfTimer() as train_timer:
try:
trained_model = rf_model.fit(X_train, y_train)
except Exception as error:
self.log_to_file("\n\n! Error during fit " + str(error))
return trained_model, train_timer.duration
def evaluate_test_perf(self, trained_model, X_test, y_test, threshold=0.5):
"""
Evaluates the model performance on the inference set. For XGBoost we need
to generate a DMatrix and then we can evaluate the model.
For Random Forest, in single GPU case, we can just call .score function.
And multi-GPU Random Forest needs to predict on the model and then compute
the accuracy score.
Parameters
----------
trained_model : The object of the trained model either of XGBoost or RandomForest
X_test : dataframe
The data for testing
y_test : dataframe
The label to be used for testing.
Returns
----------
test_accuracy : float
The accuracy achieved on test set
duration : float
The time it took to evaluate the model
"""
self.log_to_file(f"\n> Inferencing on test set")
test_accuracy = None
with PerfTimer() as inference_timer:
try:
if self.model_type == "XGBoost":
if "multi" in self.compute_type:
test_DMatrix = xgboost.dask.DaskDMatrix(
self.client, data=X_test, label=y_test
)
xgb_pred = xgboost.dask.predict(
self.client, trained_model, test_DMatrix
).compute()
xgb_pred = (xgb_pred > threshold) * 1.0
test_accuracy = accuracy_score(y_test.compute(), xgb_pred)
elif "single" in self.compute_type:
test_DMatrix = xgboost.DMatrix(data=X_test, label=y_test)
xgb_pred = trained_model.predict(test_DMatrix)
xgb_pred = (xgb_pred > threshold) * 1.0
test_accuracy = accuracy_score(y_test, xgb_pred)
elif self.model_type == "RandomForest":
if "multi" in self.compute_type:
cuml_pred = trained_model.predict(X_test).compute()
self.log_to_file("\n\tPrediction complete")
test_accuracy = accuracy_score(
y_test.compute(), cuml_pred, convert_dtype=True
)
elif "single" in self.compute_type:
test_accuracy = trained_model.score(
X_test, y_test.astype("int32")
)
except Exception as error:
self.log_to_file("\n\n!error during inference: " + str(error))
self.log_to_file(f"\n\tFinished inference in {inference_timer.duration:.4f} s")
self.log_to_file(f"\n\tTest-accuracy: {test_accuracy}")
return test_accuracy, inference_timer.duration
def set_up_logging(self):
"""
Function to set up logging for the object.
"""
logging_path = self.CSP_paths["output"] + "/log.txt"
logging.basicConfig(filename=logging_path, level=logging.INFO)
def log_to_file(self, text):
"""
Logs the text that comes in as input.
"""
logging.info(text)
print(text)
# perf_counter = highest available timer resolution
class PerfTimer:
def __init__(self):
self.start = None
self.duration = None
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.duration = time.perf_counter() - self.start
|
160a7c2b86a96aede5a51b5866f10bd5d06080a4
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DQMOffline/L1Trigger/python/L1TRateOfflineParams_cff.py
|
660056595f2d05c1aee757ca94f9eae0c390b5f3
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 62,793
|
py
|
L1TRateOfflineParams_cff.py
|
import FWCore.ParameterSet.Config as cms
RateParams_2012 = cms.VPSet(
cms.PSet(
AlgoName = cms.string ('L1_SingleEG22'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-250.646,1.78975,-5.5801e-05,5.90724e-09),
ParErrors = cms.vdouble(4.6133,0.00124988,2.17871e-07,2.56616e-11),
),
cms.PSet(
AlgoName = cms.string ('L1_ETM50'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-781.253,0.589212,-8.09682e-05,6.11958e-09),
ParErrors = cms.vdouble(1.57155,0.000427218,7.48549e-08,8.83838e-12),
),
cms.PSet(
AlgoName = cms.string ('L1_ETT80'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(6.41489e+06,-3748.94,0.656481,-2.14204e-05),
ParErrors = cms.vdouble(34685.1,9.68504,0.00177067,2.12745e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_HTT100'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-461.806,0.635347,-1.80574e-05,1.61805e-08),
ParErrors = cms.vdouble(2026.82,0.549814,9.61286e-05,1.1393e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleIsoEG20er'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(152.06,1.02088,5.33485e-06,9.91573e-10),
ParErrors = cms.vdouble(3.81866,0.00103454,1.80314e-07,2.12361e-11),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet128'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-217.021,0.326,-1.41957e-05,1.4588e-09),
ParErrors = cms.vdouble(1.82347,0.000494797,8.64361e-08,1.01899e-11),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu16'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-5435.68,6.38796,-0.000563198,3.4096e-08),
ParErrors = cms.vdouble(7.08545,0.00194953,3.5046e-07,4.37085e-11),
),
)
RateParams_2011 = cms.VPSet(
cms.PSet(
AlgoName = cms.string ('L1_BeamHalo'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-2.217e+05,4674,-24.57,0.0403),
ParErrors = cms.vdouble(917,8.9,0.0536,0.000189),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet36'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-3.063e+04,573.5,-2.557,0.003734),
ParErrors = cms.vdouble(196,1.9,0.0115,4.04e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu3_Jet28_Central'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(28.02,0.8295,0.005182,-3.804e-06),
ParErrors = cms.vdouble(0.767,0.0074,4.37e-05,1.55e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG12_Eta1p39'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-48.09,8.644,-0.007349,1.327e-05),
ParErrors = cms.vdouble(1.67,0.0161,9.51e-05,3.37e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet80_Central'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(2.729,1.798,-0.0004621,2.024e-06),
ParErrors = cms.vdouble(0.809,0.00781,4.61e-05,1.64e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleMu0_HighQ'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(5.465,0.842,-0.0003247,7.12e-07),
ParErrors = cms.vdouble(0.547,0.00528,3.12e-05,1.11e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_ETT220'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-0.1326,0.0824,-3.825e-05,3.86e-07),
ParErrors = cms.vdouble(0.166,0.0016,9.45e-06,3.35e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_ETM100'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(0.5002,0.0287,-2.59e-05,1.145e-07),
ParErrors = cms.vdouble(0.115,0.00111,6.54e-06,2.32e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_EG5_HTT100'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(0.2832,0.4398,-0.0001367,8.933e-07),
ParErrors = cms.vdouble(0.352,0.0034,2.01e-05,7.12e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleEG3'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.804e+05,3289,-16.4,0.02692),
ParErrors = cms.vdouble(724,7.03,0.0428,0.00015),
),
cms.PSet(
AlgoName = cms.string ('L1_Bsc2Plus_BptxPlus'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-332.6,6.053,-0.02989,5.071e-05),
ParErrors = cms.vdouble(0.827,0.00798,4.72e-05,1.67e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleEG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-2.008e+04,371,-1.88,0.003111),
ParErrors = cms.vdouble(77.5,0.752,0.00457,1.6e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu0_HTT75'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-0.5495,0.08488,-1.23e-05,1.535e-07),
ParErrors = cms.vdouble(0.154,0.00149,8.81e-06,3.12e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleEG8'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-2.472,2.01,-0.0003312,1.398e-06),
ParErrors = cms.vdouble(0.741,0.00715,4.23e-05,1.5e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_MuOpen_EG12'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-6.04,0.5044,-4.981e-05,1.392e-06),
ParErrors = cms.vdouble(0.391,0.00377,2.23e-05,7.9e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu7_EG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(19.57,0.3116,0.003338,-4.707e-06),
ParErrors = cms.vdouble(0.462,0.00446,2.64e-05,9.34e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleTauJet28'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(10.61,4.514,0.002078,3.074e-06),
ParErrors = cms.vdouble(1.44,0.0139,8.19e-05,2.9e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu3_DoubleEG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-4.805,0.3497,-2.462e-05,9.928e-07),
ParErrors = cms.vdouble(0.304,0.00294,1.74e-05,6.15e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_EG10_Jet24_Central_deltaPhi1'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-37.21,5.761,-0.003462,1.259e-05),
ParErrors = cms.vdouble(1.39,0.0134,7.94e-05,2.81e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_TripleEG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-14.55,1.521,-0.0001348,2.72e-06),
ParErrors = cms.vdouble(0.613,0.00592,3.49e-05,1.24e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleMu3_EG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(13.59,-0.1493,0.001781,-2.755e-06),
ParErrors = cms.vdouble(0.202,0.00195,1.15e-05,4.08e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_BeamGas_Hf'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-3.154e+04,521.7,-2.06,0.002688),
ParErrors = cms.vdouble(202,1.96,0.0118,4.15e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_QuadJet20_Central'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-5.859,0.5604,-0.0004361,5.489e-06),
ParErrors = cms.vdouble(0.474,0.00458,2.7e-05,9.59e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet36_FwdVeto'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(8.003,1.331,-0.004132,8.211e-06),
ParErrors = cms.vdouble(0.818,0.0079,4.68e-05,1.66e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleEG5_HTT75'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-2.418,0.5123,-8.796e-05,9.052e-07),
ParErrors = cms.vdouble(0.343,0.00331,1.95e-05,6.93e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu10_Jet36_Central'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.706,0.2342,-2.527e-05,4.362e-07),
ParErrors = cms.vdouble(0.277,0.00267,1.58e-05,5.6e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_BeamGas_Bsc'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-889.9,17.29,-0.1058,0.0002141),
ParErrors = cms.vdouble(1.33,0.0129,8.66e-05,3.33e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu16'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(226.6,-0.3892,0.021,-4.213e-05),
ParErrors = cms.vdouble(1.23,0.0119,7e-05,2.48e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu10'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(307.6,1.192,0.02778,-5.663e-05),
ParErrors = cms.vdouble(1.67,0.0161,9.53e-05,3.38e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu12'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(278.3,0.302,0.02538,-5.128e-05),
ParErrors = cms.vdouble(1.45,0.014,8.25e-05,2.93e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_TripleEG_8_8_5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-2.64,0.5241,-5.105e-05,4.443e-07),
ParErrors = cms.vdouble(0.393,0.00379,2.24e-05,7.94e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleForJet32_EtaOpp'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1.042,0.005913,-0.0001012,8.583e-07),
ParErrors = cms.vdouble(0.0765,0.000739,4.36e-06,1.55e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_ETM50'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1.501,0.1614,-8.716e-05,3.255e-07),
ParErrors = cms.vdouble(0.247,0.00238,1.41e-05,4.99e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-3.822e+05,7192,-36.99,0.06157),
ParErrors = cms.vdouble(1.42e+03,13.8,0.0839,0.000294),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleTauJet52'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(4.432,5.923,-0.00125,4.003e-06),
ParErrors = cms.vdouble(1.53,0.0148,8.72e-05,3.09e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMuBeamHalo'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-5223,105.5,-0.5779,0.001169),
ParErrors = cms.vdouble(15.5,0.149,0.000882,3.13e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu7'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(391.7,4.519,0.03322,-6.835e-05),
ParErrors = cms.vdouble(2.21,0.0213,0.000126,4.46e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet92_Central'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(3.282,0.9599,-0.000304,1.313e-06),
ParErrors = cms.vdouble(0.573,0.00553,3.27e-05,1.16e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu3'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1.16e+04,-206.7,2.232,-0.005963),
ParErrors = cms.vdouble(142,1.39,0.0087,3.19e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet16'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-2.864e+05,4590,-13.57,0.01042),
ParErrors = cms.vdouble(2.72e+03,26.5,0.164,0.000573),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleTauJet32'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(16.08,2.862,0.001933,-1.043e-06),
ParErrors = cms.vdouble(1.05,0.0101,5.96e-05,2.11e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleTauJet36'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-4.067,2.15,-0.0003057,1.282e-06),
ParErrors = cms.vdouble(0.884,0.00853,5.04e-05,1.79e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_Bsc2Minus_BptxMinus'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-563.2,11,-0.07045,0.0001489),
ParErrors = cms.vdouble(1.03,0.0101,6.41e-05,2.55e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG12'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-41.86,14.6,-0.01057,1.945e-05),
ParErrors = cms.vdouble(3.12,0.0301,0.000178,6.31e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleIsoEG12_Eta2p17'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-68.49,10.51,-0.01074,1.996e-05),
ParErrors = cms.vdouble(2.68,0.0258,0.000153,5.41e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG12_Eta2p17'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-89.66,12.44,-0.01358,2.542e-05),
ParErrors = cms.vdouble(2.76,0.0266,0.000157,5.57e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG15'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(16.19,6.582,-0.001815,4.02e-06),
ParErrors = cms.vdouble(2.47,0.0238,0.000141,4.98e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_HTT50_HTM30'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.772,1.116,-0.0003465,2.65e-06),
ParErrors = cms.vdouble(0.638,0.00616,3.64e-05,1.29e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_TripleJet28_Central'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(15.37,1.152,0.002656,1.155e-06),
ParErrors = cms.vdouble(0.866,0.00836,4.94e-05,1.75e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_HTM50'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-21.27,21.11,0.007577,4.491e-05),
ParErrors = cms.vdouble(6.08,0.0587,0.000347,1.23e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_HTT50_HTM50'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-6.349,2.243,-0.0006191,5.275e-06),
ParErrors = cms.vdouble(0.971,0.00937,5.54e-05,1.96e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMuOpen'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.745e+05,3285,-16.89,0.02808),
ParErrors = cms.vdouble(650,6.31,0.0383,0.000134),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleForJet44_EtaOpp'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-0.08928,0.002069,3.464e-07,1.239e-08),
ParErrors = cms.vdouble(0.0222,0.000215,1.27e-06,4.5e-09),
),
cms.PSet(
AlgoName = cms.string ('L1_EG5_Jet36_deltaPhi1'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(7.584,10.55,0.004835,1.546e-05),
ParErrors = cms.vdouble(2.49,0.024,0.000142,5.03e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet128'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1.714,0.2416,-0.0001174,4.736e-07),
ParErrors = cms.vdouble(0.283,0.00273,1.62e-05,5.72e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet32_NotBptxOR_NotMuBeamHalo'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-37.43,2.527,-0.005776,1.052e-05),
ParErrors = cms.vdouble(1.01,0.00972,5.75e-05,2.04e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleJet44_Central'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(42.7,3.178,0.004426,-3.879e-06),
ParErrors = cms.vdouble(1.47,0.0142,8.4e-05,2.98e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu7_Jet20_Central'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(27.68,0.2173,0.003906,-4.363e-06),
ParErrors = cms.vdouble(0.538,0.0052,3.07e-05,1.09e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_InterBunch_Hf'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.034e+07,2.31e+05,-1180,1.893),
ParErrors = cms.vdouble(6.53e+04,633,3.81,0.0134),
),
cms.PSet(
AlgoName = cms.string ('L1_ETT260_EG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-0.5519,0.04146,6.584e-07,6.336e-08),
ParErrors = cms.vdouble(0.11,0.00106,6.26e-06,2.22e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG20'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(5.959,2.663,-0.001003,2.586e-06),
ParErrors = cms.vdouble(1.93,0.0186,0.00011,3.9e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleEG5_HTT50'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-6.566,1.346,-0.000287,2.907e-06),
ParErrors = cms.vdouble(0.687,0.00663,3.92e-05,1.39e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu3_Jet20_Central'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(29.36,1.41,0.007502,-1.804e-06),
ParErrors = cms.vdouble(1.11,0.0107,6.35e-05,2.25e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet92'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1.996,1.031,-0.0002973,1.242e-06),
ParErrors = cms.vdouble(0.611,0.0059,3.49e-05,1.24e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_HTT50'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-10.89,3.884,-0.001037,8.683e-06),
ParErrors = cms.vdouble(1.52,0.0146,8.64e-05,3.06e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_ETT300_EG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-0.6599,0.03346,3.263e-05,-3.985e-07),
ParErrors = cms.vdouble(0.272,0.00263,1.55e-05,5.5e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleTauJet40'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.449,1.404,-0.0002159,5.049e-07),
ParErrors = cms.vdouble(0.675,0.00652,3.85e-05,1.36e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_EG12_Jet24_Central_deltaPhi1'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-27.42,4.091,-0.002958,9.164e-06),
ParErrors = cms.vdouble(1.13,0.0109,6.45e-05,2.29e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_TripleEG_8_5_5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-8.578,1.128,-0.0001071,1.566e-06),
ParErrors = cms.vdouble(0.577,0.00557,3.29e-05,1.17e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleEG_12_5_Eta1p39'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1654,30.88,-0.157,0.0002593),
ParErrors = cms.vdouble(6.71,0.0651,0.000397,1.39e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleJet36_Central'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(33.82,6.255,0.004926,1.289e-06),
ParErrors = cms.vdouble(2.21,0.0213,0.000126,4.46e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG30'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1.746,0.7654,-0.0002219,6.079e-07),
ParErrors = cms.vdouble(0.551,0.00532,3.15e-05,1.12e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu5_Eta1p5_Q80'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-394.1,11.07,-0.03401,5.359e-05),
ParErrors = cms.vdouble(1.74,0.0167,9.89e-05,3.51e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleMu3p5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.123,1.349,-0.000353,2.354e-06),
ParErrors = cms.vdouble(0.74,0.00714,4.22e-05,1.5e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleMu0_HighQ_EtaCuts'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(7.947,0.751,-0.0003687,5.715e-07),
ParErrors = cms.vdouble(0.508,0.0049,2.89e-05,1.03e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_ZeroBias'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(2.176e+06,107,-0.6558,0.001318),
ParErrors = cms.vdouble(77.9,0.751,0.00444,1.57e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu12_EG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(11.32,0.0644,0.00173,-2.519e-06),
ParErrors = cms.vdouble(0.328,0.00316,1.87e-05,6.62e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_InterBunch_Bsc'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.064e+06,6.21e+04,-305.5,0.4944),
ParErrors = cms.vdouble(1.48e+03,14.3,0.0845,0.0003),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet68'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-23.88,4.347,-0.003147,8.809e-06),
ParErrors = cms.vdouble(1.27,0.0123,7.25e-05,2.57e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_BptxMinus_NotBptxPlus'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(6.52e+04,42.93,-0.2665,0.0005436),
ParErrors = cms.vdouble(7.81,0.0754,0.000445,1.58e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_BptxPlus_NotBptxMinus'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(6.627e+04,22.82,-0.1415,0.0002882),
ParErrors = cms.vdouble(4.52,0.0437,0.000258,9.14e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu3_EG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(60.99,0.967,0.0123,-1.647e-05),
ParErrors = cms.vdouble(0.922,0.0089,5.26e-05,1.86e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_HTT150'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(0.9649,0.1672,-7.215e-05,3.948e-07),
ParErrors = cms.vdouble(0.251,0.00242,1.43e-05,5.07e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleIsoEG10'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-0.2815,0.4109,-6.543e-05,1.158e-07),
ParErrors = cms.vdouble(0.341,0.00329,1.94e-05,6.89e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_QuadJet28_Central'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-2.753,0.2655,-9.579e-05,1.405e-06),
ParErrors = cms.vdouble(0.269,0.0026,1.54e-05,5.45e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_HTT75'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.871,1.313,-0.0003372,2.406e-06),
ParErrors = cms.vdouble(0.69,0.00666,3.93e-05,1.39e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_BscMinBiasOR_BptxPlusANDMinus'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(7.5e+05,1.971e+04,-92.59,0.1468),
ParErrors = cms.vdouble(500,4.83,0.0285,0.000101),
),
cms.PSet(
AlgoName = cms.string ('L1_PreCollisions'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-6.914e+04,1158,-4.805,0.006113),
ParErrors = cms.vdouble(352,3.41,0.0206,7.24e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleTauJet68'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(4.248,1.65,-0.0004169,9.507e-07),
ParErrors = cms.vdouble(0.712,0.00687,4.06e-05,1.44e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleJet52'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-4.872,2.418,-0.0004958,3.303e-06),
ParErrors = cms.vdouble(1.07,0.0104,6.12e-05,2.17e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleTauJet80'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(2.613,0.7176,-0.000204,4.198e-07),
ParErrors = cms.vdouble(0.456,0.0044,2.6e-05,9.22e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu5_EG12'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-0.9276,0.1799,-2.543e-05,2.733e-07),
ParErrors = cms.vdouble(0.211,0.00203,1.2e-05,4.26e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_MuOpen_EG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(115.9,1.924,0.0295,-3.29e-05),
ParErrors = cms.vdouble(1.79,0.0173,0.000102,3.62e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet20_NotBptxOR'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-24.41,11.01,-0.00962,1.614e-05),
ParErrors = cms.vdouble(2.97,0.0286,0.000169,6e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleIsoEG12'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-23.41,12.41,-0.007881,1.412e-05),
ParErrors = cms.vdouble(2.99,0.0289,0.000171,6.05e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_EG5_HTT125'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(0.9394,0.2254,-9.459e-05,5.622e-07),
ParErrors = cms.vdouble(0.28,0.0027,1.6e-05,5.66e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu0_HTT50'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.826,0.2027,-2.206e-05,4.301e-07),
ParErrors = cms.vdouble(0.238,0.0023,1.36e-05,4.82e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_ETM70'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(0.8685,0.05848,-4.67e-05,1.856e-07),
ParErrors = cms.vdouble(0.151,0.00146,8.63e-06,3.06e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_ETM20'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-69.51,8.269,-0.001148,6.683e-05),
ParErrors = cms.vdouble(2.74,0.0265,0.000156,5.54e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleEG_12_5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-4.085,3.06,-0.0005698,2.818e-06),
ParErrors = cms.vdouble(0.948,0.00915,5.4e-05,1.92e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_EG8_Jet20_Central_deltaPhi1'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-23.1,9.62,0.0005052,1.776e-05),
ParErrors = cms.vdouble(2.08,0.0201,0.000119,4.21e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu7_TauJet16'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(17.17,0.371,0.002836,-2.271e-06),
ParErrors = cms.vdouble(0.497,0.0048,2.83e-05,1e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_TripleEG7'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-0.7609,0.2553,-4.256e-05,2.581e-07),
ParErrors = cms.vdouble(0.267,0.00258,1.52e-05,5.4e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_EG5_HTT75'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-2.497,0.9994,-0.0002386,1.871e-06),
ParErrors = cms.vdouble(0.536,0.00517,3.06e-05,1.08e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet20_NotBptxOR_NotMuBeamHalo'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-22.21,10.95,-0.009189,1.524e-05),
ParErrors = cms.vdouble(2.96,0.0286,0.000169,5.99e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu25'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(173.8,-0.7411,0.01661,-3.326e-05),
ParErrors = cms.vdouble(1.01,0.00976,5.77e-05,2.04e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleEG10'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-0.05877,0.748,-0.0001303,1.851e-07),
ParErrors = cms.vdouble(0.456,0.0044,2.6e-05,9.22e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu20'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(186.1,-0.4123,0.0174,-3.495e-05),
ParErrors = cms.vdouble(1.08,0.0104,6.17e-05,2.19e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu7_Barrel'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(94.78,1.884,0.002084,-4.637e-06),
ParErrors = cms.vdouble(0.995,0.0096,5.67e-05,2.01e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_HTT100'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(0.9086,0.57,-0.0001851,1.12e-06),
ParErrors = cms.vdouble(0.421,0.00407,2.4e-05,8.52e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleEG2_FwdVeto'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-2721,68.2,-0.3894,0.0006788),
ParErrors = cms.vdouble(31.9,0.31,0.00192,6.74e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleIsoEG12_Eta1p39'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-36.67,7.33,-0.005623,9.895e-06),
ParErrors = cms.vdouble(1.52,0.0146,8.65e-05,3.07e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu3_Jet16_Central'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(54.42,1.388,0.01152,-4.061e-06),
ParErrors = cms.vdouble(1.26,0.0122,7.19e-05,2.55e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleMu3'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(44.72,0.8847,0.005398,-8.196e-06),
ParErrors = cms.vdouble(0.9,0.00869,5.13e-05,1.82e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleMu0'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(113.6,0.3927,0.01494,-2.432e-05),
ParErrors = cms.vdouble(1.22,0.0118,6.94e-05,2.46e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleMu5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.153,0.6484,-0.0001937,1.474e-06),
ParErrors = cms.vdouble(0.459,0.00443,2.61e-05,9.27e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_EG12_TauJet20_deltaPhi1'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-35.16,3.416,-0.003848,9.685e-06),
ParErrors = cms.vdouble(0.849,0.0082,4.84e-05,1.72e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_ETM30'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-30.37,1.8,-0.003443,9.613e-06),
ParErrors = cms.vdouble(0.644,0.00622,3.67e-05,1.3e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu7_Eta2p1'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(115.2,4.653,0.003062,-6.747e-06),
ParErrors = cms.vdouble(1.43,0.0138,8.18e-05,2.9e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet52'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-12.22,11.63,-0.001005,1.478e-05),
ParErrors = cms.vdouble(2.85,0.0275,0.000163,5.77e-07),
),
)
RateParams_2010 = cms.VPSet(
cms.PSet(
AlgoName = cms.string ('L1_SingleTauJet50U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.676,3.04,-0.0007998,1.166e-06),
ParErrors = cms.vdouble(0.766,0.00896,6.44e-05,2.73e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleHfRingEtSumsRing1_P4N4'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.85e+04,438.6,-3.281,0.02365),
ParErrors = cms.vdouble(191,2.24,0.0161,6.83e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet50U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(21.22,3.712,0.003283,-7.352e-06),
ParErrors = cms.vdouble(0.933,0.0109,7.85e-05,3.33e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_HTT200'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(0.1475,0.2208,-0.0001087,9.218e-07),
ParErrors = cms.vdouble(0.199,0.00233,1.67e-05,7.09e-08),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet30U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(163.2,11.43,0.03061,-5.895e-05),
ParErrors = cms.vdouble(2.67,0.0312,0.000224,9.51e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleEG2'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(2.575e+04,275.1,6.385,-0.01201),
ParErrors = cms.vdouble(196,2.3,0.0165,7e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleEG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(709.7,12.11,0.1411,-0.0002923),
ParErrors = cms.vdouble(5.7,0.0666,0.000479,2.03e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_BscSplashBeam2'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1.336e+06,1.844e+04,-56.33,0.01512),
ParErrors = cms.vdouble(1.6e+03,18.8,0.135,0.000572),
),
cms.PSet(
AlgoName = cms.string ('L1_QuadJet6U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-8385,244.2,-1.195,0.01259),
ParErrors = cms.vdouble(101,1.18,0.00847,3.59e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_ETT100'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1088,15.24,1.157,0.01581),
ParErrors = cms.vdouble(194,2.27,0.0164,6.93e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_ZdcMinusOverThreshold'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(4.018e+07,-3667,30.18,-0.07757),
ParErrors = cms.vdouble(2.04e+04,239,1.72,0.00728),
),
cms.PSet(
AlgoName = cms.string ('L1_BptxXOR_BscMinBiasOR'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(5.446e+04,1396,-13.94,0.03313),
ParErrors = cms.vdouble(644,7.53,0.0542,0.00023),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu5_Jet6U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(187.5,3.155,0.06029,-8.153e-05),
ParErrors = cms.vdouble(3.52,0.0412,0.000296,1.26e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_BscHaloBeam1Outer'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(4.022e+07,-4541,36.58,-0.09288),
ParErrors = cms.vdouble(2.04e+04,239,1.72,0.00728),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleMuTopBottom'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(31.67,0.2592,0.0059,-1.111e-05),
ParErrors = cms.vdouble(0.518,0.00606,4.36e-05,1.85e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleHfRingEtSumsRing2_4'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1.581e+05,-2388,59.9,-0.1289),
ParErrors = cms.vdouble(825,10.1,0.0815,0.000444),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleTauJet14U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(154.2,2.543,0.02894,-5.558e-05),
ParErrors = cms.vdouble(1.47,0.0172,0.000123,5.23e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleJet30U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(12.44,1.676,0.003401,-6.471e-06),
ParErrors = cms.vdouble(0.58,0.00679,4.88e-05,2.07e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleCenJet2U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1597,48.25,0.4401,-0.0002804),
ParErrors = cms.vdouble(21.1,0.247,0.00178,7.54e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleTauJet10U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(5477,171.4,1.25,-0.001811),
ParErrors = cms.vdouble(56.8,0.664,0.00478,2.03e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleForJet4U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(3.808e+05,-6611,103.2,-0.2673),
ParErrors = cms.vdouble(1e+03,12.4,0.104,0.000616),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu14'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(91.67,2.427,0.008323,-2.046e-05),
ParErrors = cms.vdouble(1.41,0.0166,0.000119,5.05e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu10'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(123.3,4.307,0.01015,-2.487e-05),
ParErrors = cms.vdouble(2,0.0234,0.000168,7.12e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG2'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(8.167e+04,2866,11.43,-0.03305),
ParErrors = cms.vdouble(622,7.28,0.0523,0.000222),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(8468,243.9,1.41,-0.00348),
ParErrors = cms.vdouble(64.6,0.755,0.00543,2.3e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_HTM30'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-195.1,15.97,-0.05375,0.0002671),
ParErrors = cms.vdouble(5.4,0.0632,0.000454,1.93e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_IsoEG10_Jet6U_ForJet6U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1213,48.23,0.193,-0.0005953),
ParErrors = cms.vdouble(10.2,0.119,0.000857,3.63e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMuBeamHalo'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-7972,208.2,-1.454,0.003662),
ParErrors = cms.vdouble(40.1,0.472,0.00341,1.44e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_BscHaloBeam2Inner'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(4.022e+07,-4541,36.58,-0.09288),
ParErrors = cms.vdouble(2.04e+04,239,1.72,0.00728),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(338.4,19.59,0.02863,-7.366e-05),
ParErrors = cms.vdouble(5.65,0.0661,0.000476,2.02e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu3'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1267,37.66,0.1455,-0.0003693),
ParErrors = cms.vdouble(13.1,0.153,0.0011,4.68e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu0'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(2215,39,0.3018,-0.0007628),
ParErrors = cms.vdouble(19.8,0.232,0.00167,7.06e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleIsoEG8'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(2213,79.99,0.3602,-0.0008934),
ParErrors = cms.vdouble(18.5,0.217,0.00156,6.61e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleIsoEG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(8406,240.8,1.4,-0.003453),
ParErrors = cms.vdouble(63.9,0.748,0.00538,2.28e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_BscHaloBeam1Inner'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(4.022e+07,-4541,36.58,-0.09288),
ParErrors = cms.vdouble(2.04e+04,239,1.72,0.00728),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG8'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(2303,81.79,0.3757,-0.0009311),
ParErrors = cms.vdouble(19.2,0.225,0.00161,6.85e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG10'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1200,52.07,0.1942,-0.0004856),
ParErrors = cms.vdouble(11.3,0.133,0.000953,4.04e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG15'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(392.7,24.23,0.06064,-0.0001536),
ParErrors = cms.vdouble(4.91,0.0574,0.000413,1.75e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMuOpen'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1933,119.8,0.2037,-0.0004946),
ParErrors = cms.vdouble(57.3,0.67,0.00482,2.04e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_ETT60'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(4.988e+04,-864.9,32.92,-0.04586),
ParErrors = cms.vdouble(773,9.04,0.065,0.000276),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleForJet10U_EtaOpp'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1695,44.09,-0.373,0.001375),
ParErrors = cms.vdouble(5.58,0.0653,0.00047,1.99e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG12'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(704.9,37.5,0.1125,-0.0002837),
ParErrors = cms.vdouble(7.72,0.0903,0.000649,2.75e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleTauJet30U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(99.06,6.729,0.01584,-3.788e-05),
ParErrors = cms.vdouble(1.63,0.019,0.000137,5.8e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_HTM20'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1095,67.39,-0.2105,0.001239),
ParErrors = cms.vdouble(13,0.153,0.0011,4.65e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleHfRingEtSumsRing1_4'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-7917,382.5,12.52,0.002046),
ParErrors = cms.vdouble(636,7.44,0.0535,0.000227),
),
cms.PSet(
AlgoName = cms.string ('L1_Bsc2Plus_BptxPlus'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(2525,124.8,-1.749,0.00579),
ParErrors = cms.vdouble(40.8,0.489,0.00383,1.81e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_BscSplashBeam1'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(9.969e+05,2.36e+04,-83.64,0.0667),
ParErrors = cms.vdouble(1.58e+03,18.5,0.133,0.000565),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleTauJet4U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-1.739e+06,5.78e+04,-506.6,1.663),
ParErrors = cms.vdouble(609,7.07,0.0665,0.000508),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleEG20'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(184.9,13.67,0.02738,-6.972e-05),
ParErrors = cms.vdouble(2.87,0.0336,0.000241,1.02e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu3_Jet6U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(609.2,1.679,0.1762,-0.0002612),
ParErrors = cms.vdouble(7.71,0.0902,0.000648,2.75e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleCenJet4U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1597,48.25,0.4401,-0.0002804),
ParErrors = cms.vdouble(21.1,0.247,0.00178,7.54e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_BscMinBiasThreshold2'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(6.827e+05,2.802e+04,-108,0.1318),
ParErrors = cms.vdouble(1.99e+03,23.3,0.168,0.000711),
),
cms.PSet(
AlgoName = cms.string ('L1_BscMinBiasThreshold1'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1.416e+06,2.846e+04,-149.2,0.2796),
ParErrors = cms.vdouble(5.33e+03,62.4,0.448,0.0019),
),
cms.PSet(
AlgoName = cms.string ('L1_ZdcTightVertex'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(4.018e+07,-3601,29.74,-0.07661),
ParErrors = cms.vdouble(2.04e+04,239,1.72,0.00728),
),
cms.PSet(
AlgoName = cms.string ('L1_ETM70'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-0.9719,1.967,-0.0004967,5.439e-07),
ParErrors = cms.vdouble(0.585,0.00685,4.92e-05,2.09e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_HTT50'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-694.3,31.61,-0.1548,0.0006341),
ParErrors = cms.vdouble(4.46,0.0522,0.000375,1.59e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleTauJet20U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(421.7,18.68,0.07635,-0.000167),
ParErrors = cms.vdouble(4.73,0.0554,0.000398,1.69e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet6U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(3.068e+04,1765,12.97,-0.01737),
ParErrors = cms.vdouble(568,6.64,0.0478,0.000203),
),
cms.PSet(
AlgoName = cms.string ('L1_ZeroBias'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(3.927e+06,-376.1,3.088,-0.007953),
ParErrors = cms.vdouble(1.99e+03,23.3,0.168,0.000711),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu7'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(166.3,9.15,0.01136,-2.841e-05),
ParErrors = cms.vdouble(3.15,0.0368,0.000265,1.12e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet10U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(751.7,457,0.9393,0.002868),
ParErrors = cms.vdouble(125,1.46,0.0105,4.44e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_BptxMinus_NotBptxPlus'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(2.257e+05,-21.58,0.1792,-0.0004666),
ParErrors = cms.vdouble(124,1.45,0.0105,4.43e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_BptxPlus_NotBptxMinus'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(2.257e+05,-21.58,0.1792,-0.0004666),
ParErrors = cms.vdouble(124,1.45,0.0105,4.43e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu3_EG5'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(142.7,0.5189,0.02901,-5.707e-05),
ParErrors = cms.vdouble(1.29,0.0151,0.000108,4.6e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_BscHaloBeam2Outer'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(4.022e+07,-4541,36.58,-0.09288),
ParErrors = cms.vdouble(2.04e+04,239,1.72,0.00728),
),
cms.PSet(
AlgoName = cms.string ('L1_Bsc2Minus_BptxMinus'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(3.687e+04,39.65,-3.653,0.01307),
ParErrors = cms.vdouble(114,1.35,0.0103,4.6e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_BscMinBiasOR_BptxPlusANDMinus'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1.754e+06,2.262e+04,-95.08,0.133),
ParErrors = cms.vdouble(2.05e+03,24,0.172,0.00073),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleHfRingEtSumsRing2_P4N4'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-3851,-9.252,6.32,0.01293),
ParErrors = cms.vdouble(351,4.11,0.0295,0.000125),
),
cms.PSet(
AlgoName = cms.string ('L1_ETT140'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-8789,235.6,-2.173,0.01018),
ParErrors = cms.vdouble(49.6,0.581,0.00417,1.77e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_ETM12'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-901.1,184.3,1.3,0.0008738),
ParErrors = cms.vdouble(82.1,0.961,0.00691,2.93e-05),
),
cms.PSet(
AlgoName = cms.string ('L1_Mu3_Jet10U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(118.5,2.611,0.03228,-9.893e-07),
ParErrors = cms.vdouble(2.63,0.0307,0.000221,9.36e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet40U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(53.71,5.838,0.009583,-2.059e-05),
ParErrors = cms.vdouble(1.4,0.0164,0.000118,5e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleIsoEG10'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1133,50.58,0.1828,-0.0004579),
ParErrors = cms.vdouble(10.8,0.127,0.000912,3.87e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleIsoEG12'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(667.4,36.02,0.1059,-0.0002676),
ParErrors = cms.vdouble(7.36,0.0861,0.000619,2.63e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_QuadJet8U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-4783,131.3,-1.066,0.004608),
ParErrors = cms.vdouble(20.6,0.241,0.00173,7.35e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleIsoEG15'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(352.5,23.51,0.05405,-0.0001376),
ParErrors = cms.vdouble(4.68,0.0547,0.000393,1.67e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_ZdcLooseVertex'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(4.018e+07,-3601,29.74,-0.07661),
ParErrors = cms.vdouble(2.04e+04,239,1.72,0.00728),
),
cms.PSet(
AlgoName = cms.string ('L1_ETM20'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-290.3,27.6,-0.02299,0.0003901),
ParErrors = cms.vdouble(7.02,0.0822,0.000591,2.5e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleMu20'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(66.31,1.389,0.006441,-1.602e-05),
ParErrors = cms.vdouble(1.07,0.0126,9.03e-05,3.83e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_TripleJet14U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-81.86,5.326,-0.01481,8.236e-05),
ParErrors = cms.vdouble(1.17,0.0136,9.81e-05,4.16e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_HTT100'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-92.63,5.696,-0.02158,7.514e-05),
ParErrors = cms.vdouble(0.922,0.0108,7.76e-05,3.29e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_ZdcPlusOverThreshold'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(1.527e+07,3.147e+05,-1994,4.736),
ParErrors = cms.vdouble(5.88e+05,6.89e+03,49.8,0.212),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleMuOpen'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(143.3,10.67,0.01707,-3.801e-05),
ParErrors = cms.vdouble(6.5,0.076,0.000546,2.32e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet20U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(483.1,39.11,0.108,-0.000124),
ParErrors = cms.vdouble(8.73,0.102,0.000734,3.11e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_BscHighMultiplicity'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-2.667e+05,6071,-30.79,0.06095),
ParErrors = cms.vdouble(3.05e+03,36.4,0.279,0.00133),
),
cms.PSet(
AlgoName = cms.string ('L1_DoubleMu3'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-64.07,8.91,-0.01587,4.121e-05),
ParErrors = cms.vdouble(3.38,0.0396,0.000284,1.21e-06),
),
cms.PSet(
AlgoName = cms.string ('L1_ETM30'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(-11.88,7.299,-0.002334,1.662e-05),
ParErrors = cms.vdouble(1.64,0.0192,0.000138,5.87e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet60U'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(28.08,2.228,0.004227,-1.003e-05),
ParErrors = cms.vdouble(0.731,0.00855,6.15e-05,2.61e-07),
),
cms.PSet(
AlgoName = cms.string ('L1_SingleJet10U_NotBptxOR'),
TemplateFunction = cms.string ('[0]/x+[1]+[2]*x+[3]*x*x'),
Parameters = cms.vdouble(94.22,23.63,0.001337,-1.093e-05),
ParErrors = cms.vdouble(7.83,0.0917,0.000659,2.79e-06),
),
)
RateParams = RateParams_2012
|
fe5b9955facbf921b2f9122ce3a327fe3aabc7bd
|
2f25acbc334a75ef32e67b62062e5fa9e3eec341
|
/zaqar/storage/swift/messages.py
|
a40e1de31ba58219f887b89a83aadf13e7edf309
|
[
"Apache-2.0"
] |
permissive
|
openstack/zaqar
|
123f12b157b1c3861bc455867f5413bfe27f49c7
|
169d917c3e3eaec54eeeb72859df6e4c64ef00da
|
refs/heads/master
| 2023-08-30T02:14:11.448834
| 2023-08-21T18:54:14
| 2023-08-21T18:54:14
| 12,833,284
| 102
| 46
| null | 2015-02-27T13:28:05
| 2013-09-14T17:40:21
|
Python
|
UTF-8
|
Python
| false
| false
| 18,549
|
py
|
messages.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import functools
import uuid
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import swiftclient
from zaqar.common import decorators
from zaqar import storage
from zaqar.storage import errors
from zaqar.storage.swift import utils
from zaqar.storage import utils as s_utils
class MessageController(storage.Message):
"""Implements message resource operations with swift backend
Messages are scoped by project + queue.
message -> Swift mapping:
+--------------+-----------------------------------------+
| Attribute | Storage location |
+--------------+-----------------------------------------+
| Msg UUID | Object name |
+--------------+-----------------------------------------+
| Queue Name | Container name prefix |
+--------------+-----------------------------------------+
| Project name | Container name prefix |
+--------------+-----------------------------------------+
| Created time | Object Creation Time |
+--------------+-----------------------------------------+
| Msg Body | Object content 'body' |
+--------------+-----------------------------------------+
| Client ID | Object header 'ClientID' |
+--------------+-----------------------------------------+
| Claim ID | Object content 'claim_id' |
+--------------+-----------------------------------------+
| Delay Expires| Object content 'delay_expires' |
+--------------+-----------------------------------------+
| Expires | Object Delete-After header |
+--------------------------------------------------------+
| Checksum | Object content 'body' checksum |
+--------------------------------------------------------+
"""
def __init__(self, *args, **kwargs):
super(MessageController, self).__init__(*args, **kwargs)
self._client = self.driver.connection
@decorators.lazy_property(write=False)
def _queue_ctrl(self):
return self.driver.queue_controller
def _delete_queue_messages(self, queue, project, pipe):
"""Method to remove all the messages belonging to a queue.
Will be referenced from the QueueController.
The pipe to execute deletion will be passed from the QueueController
executing the operation.
"""
container = utils._message_container(queue, project)
remaining = True
key = ''
while remaining:
headers, objects = self._client.get_container(container,
limit=1000,
marker=key)
if not objects:
return
remaining = len(objects) == 1000
key = objects[-1]['name']
for o in objects:
try:
self._client.delete_object(container, o['name'])
except swiftclient.ClientException as exc:
if exc.http_status == 404:
continue
raise
def _list(self, queue, project=None, marker=None,
limit=storage.DEFAULT_MESSAGES_PER_PAGE,
echo=False, client_uuid=None,
include_claimed=False, include_delayed=False,
sort=1):
"""List messages in the queue, oldest first(ish)
Time ordering and message inclusion in lists are soft, there is no
global order and times are based on the UTC time of the zaqar-api
server that the message was created from.
Here be consistency dragons.
"""
if not self._queue_ctrl.exists(queue, project):
raise errors.QueueDoesNotExist(queue, project)
client = self._client
container = utils._message_container(queue, project)
query_string = None
if sort == -1:
query_string = 'reverse=on'
try:
_, objects = client.get_container(
container,
marker=marker,
# list 2x the objects because some listing items may have
# expired
limit=limit * 2,
query_string=query_string)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
raise errors.QueueDoesNotExist(queue, project)
raise
def is_claimed(msg, headers):
if include_claimed or msg['claim_id'] is None:
return False
claim_obj = self.driver.claim_controller._get(
queue, msg['claim_id'], project)
return claim_obj is not None and claim_obj['ttl'] > 0
def is_delayed(msg, headers):
if include_delayed:
return False
now = timeutils.utcnow_ts()
return msg.get('delay_expires', 0) > now
def is_echo(msg, headers):
if echo:
return False
return headers['x-object-meta-clientid'] == str(client_uuid)
filters = [
is_echo,
is_claimed,
is_delayed,
]
marker = {}
get_object = functools.partial(client.get_object, container)
list_objects = functools.partial(client.get_container, container,
limit=limit * 2,
query_string=query_string)
yield utils._filter_messages(objects, filters, marker, get_object,
list_objects, limit=limit)
yield marker and marker['next']
def list(self, queue, project=None, marker=None,
limit=storage.DEFAULT_MESSAGES_PER_PAGE,
echo=False, client_uuid=None,
include_claimed=False, include_delayed=False,):
return self._list(queue, project, marker, limit, echo,
client_uuid, include_claimed, include_delayed)
def first(self, queue, project=None, sort=1):
if sort not in (1, -1):
raise ValueError(u'sort must be either 1 (ascending) '
u'or -1 (descending)')
cursor = self._list(queue, project, limit=1, sort=sort)
try:
message = next(next(cursor))
except StopIteration:
raise errors.QueueIsEmpty(queue, project)
return message
def get(self, queue, message_id, project=None):
return self._get(queue, message_id, project)
def _get(self, queue, message_id, project=None, check_queue=True):
if check_queue and not self._queue_ctrl.exists(queue, project):
raise errors.QueueDoesNotExist(queue, project)
now = timeutils.utcnow_ts(True)
headers, msg = self._find_message(queue, message_id, project)
return utils._message_to_json(message_id, msg, headers, now)
def _find_message(self, queue, message_id, project):
try:
return self._client.get_object(
utils._message_container(queue, project), message_id)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
raise errors.MessageDoesNotExist(message_id, queue, project)
else:
raise
def bulk_delete(self, queue, message_ids, project=None, claim_ids=None):
for message_id in message_ids:
try:
if claim_ids:
msg = self._get(queue, message_id, project)
if not msg['claim_id']:
raise errors.MessageNotClaimed(message_id)
if msg['claim_id'] not in claim_ids:
raise errors.ClaimDoesNotMatch(msg['claim_id'],
queue, project)
self._delete(queue, message_id, project)
except errors.MessageDoesNotExist:
pass
def bulk_get(self, queue, message_ids, project=None):
if not self._queue_ctrl.exists(queue, project):
return
for id in message_ids:
try:
yield self._get(queue, id, project, check_queue=False)
except errors.MessageDoesNotExist:
pass
def post(self, queue, messages, client_uuid, project=None):
# TODO(flwang): It would be nice if we can create a middleware in Swift
# to accept a json list so that Zaqar can create objects in bulk.
return [self._create_msg(queue, m, client_uuid, project)
for m in messages]
def _create_msg(self, queue, msg, client_uuid, project):
slug = str(uuid.uuid1())
now = timeutils.utcnow_ts()
message = {'body': msg.get('body', {}), 'claim_id': None,
'ttl': msg['ttl'], 'claim_count': 0,
'delay_expires': now + msg.get('delay', 0)}
if self.driver.conf.enable_checksum:
message['checksum'] = s_utils.get_checksum(msg.get('body', None))
contents = jsonutils.dumps(message)
utils._put_or_create_container(
self._client,
utils._message_container(queue, project),
slug,
contents=contents,
content_type='application/json',
headers={
'x-object-meta-clientid': str(client_uuid),
'x-delete-after': msg['ttl']})
return slug
def delete(self, queue, message_id, project=None, claim=None):
claim_ctrl = self.driver.claim_controller
try:
msg = self._get(queue, message_id, project)
except (errors.QueueDoesNotExist, errors.MessageDoesNotExist):
return
if claim is None:
if msg['claim_id']:
claim_obj = claim_ctrl._get(queue, msg['claim_id'], project)
if claim_obj is not None and claim_obj['ttl'] > 0:
raise errors.MessageIsClaimed(message_id)
else:
# Check if the claim does exist
claim_ctrl._exists(queue, claim, project)
if not msg['claim_id']:
raise errors.MessageNotClaimed(message_id)
elif msg['claim_id'] != claim:
raise errors.MessageNotClaimedBy(message_id, claim)
self._delete(queue, message_id, project)
def _delete(self, queue, message_id, project=None):
try:
self._client.delete_object(
utils._message_container(queue, project), message_id)
except swiftclient.ClientException as exc:
if exc.http_status != 404:
raise
def pop(self, queue, limit, project=None):
# Pop is implemented as a chain of the following operations:
# 1. Create a claim.
# 2. Delete the messages claimed.
# 3. Delete the claim.
claim_ctrl = self.driver.claim_controller
claim_id, messages = claim_ctrl.create(queue, dict(ttl=1, grace=0),
project, limit=limit)
message_ids = [message['id'] for message in messages]
self.bulk_delete(queue, message_ids, project)
return messages
class MessageQueueHandler(object):
def __init__(self, driver, control_driver):
self.driver = driver
self._client = self.driver.connection
self._queue_ctrl = self.driver.queue_controller
self._message_ctrl = self.driver.message_controller
self._claim_ctrl = self.driver.claim_controller
def create(self, name, metadata=None, project=None):
self._client.put_container(utils._message_container(name, project))
def delete(self, name, project=None):
for container in [utils._message_container(name, project),
utils._claim_container(name, project)]:
try:
headers, objects = self._client.get_container(container)
except swiftclient.ClientException as exc:
if exc.http_status != 404:
raise
else:
for obj in objects:
try:
self._client.delete_object(container, obj['name'])
except swiftclient.ClientException as exc:
if exc.http_status != 404:
raise
try:
self._client.delete_container(container)
except swiftclient.ClientException as exc:
if exc.http_status not in (404, 409):
raise
def stats(self, name, project=None):
if not self._queue_ctrl.exists(name, project=project):
raise errors.QueueDoesNotExist(name, project)
total = 0
claimed = 0
container = utils._message_container(name, project)
try:
_, objects = self._client.get_container(container)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
raise errors.QueueIsEmpty(name, project)
newest = None
oldest = None
now = timeutils.utcnow_ts(True)
for obj in objects:
try:
headers = self._client.head_object(container, obj['name'])
except swiftclient.ClientException as exc:
if exc.http_status != 404:
raise
else:
created = float(headers['x-timestamp'])
created_iso = datetime.datetime.utcfromtimestamp(
created).strftime('%Y-%m-%dT%H:%M:%SZ')
newest = {
'id': obj['name'],
'age': now - created,
'created': created_iso}
if oldest is None:
oldest = copy.deepcopy(newest)
total += 1
if headers.get('x-object-meta-claimid'):
claimed += 1
msg_stats = {
'claimed': claimed,
'free': total - claimed,
'total': total,
}
if newest is not None:
msg_stats['newest'] = newest
msg_stats['oldest'] = oldest
return {'messages': msg_stats}
def exists(self, queue, project=None):
try:
self._client.head_container(utils._message_container(queue,
project))
except swiftclient.ClientException as exc:
if exc.http_status == 404:
return False
raise
else:
return True
class MessageTopicHandler(object):
def __init__(self, driver, control_driver):
self.driver = driver
self._client = self.driver.connection
self._topic_ctrl = self.driver.topic_controller
self._message_ctrl = self.driver.message_controller
def create(self, name, metadata=None, project=None):
self._client.put_container(utils._message_container(name, project))
def delete(self, name, project=None):
for container in [utils._message_container(name, project)]:
try:
headers, objects = self._client.get_container(container)
except swiftclient.ClientException as exc:
if exc.http_status != 404:
raise
else:
for obj in objects:
try:
self._client.delete_object(container, obj['name'])
except swiftclient.ClientException as exc:
if exc.http_status != 404:
raise
try:
self._client.delete_container(container)
except swiftclient.ClientException as exc:
if exc.http_status not in (404, 409):
raise
def stats(self, name, project=None):
if not self._topic_ctrl.exists(name, project=project):
raise errors.TopicDoesNotExist(name, project)
total = 0
container = utils._message_container(name, project)
try:
_, objects = self._client.get_container(container)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
raise errors.QueueIsEmpty(name, project)
newest = None
oldest = None
now = timeutils.utcnow_ts(True)
for obj in objects:
try:
headers = self._client.head_object(container, obj['name'])
except swiftclient.ClientException as exc:
if exc.http_status != 404:
raise
else:
created = float(headers['x-timestamp'])
created_iso = datetime.datetime.utcfromtimestamp(
created).strftime('%Y-%m-%dT%H:%M:%SZ')
newest = {
'id': obj['name'],
'age': now - created,
'created': created_iso}
if oldest is None:
oldest = copy.deepcopy(newest)
total += 1
msg_stats = {
'total': total,
}
if newest is not None:
msg_stats['newest'] = newest
msg_stats['oldest'] = oldest
return {'messages': msg_stats}
def exists(self, topic, project=None):
try:
self._client.head_container(utils._message_container(topic,
project))
except swiftclient.ClientException as exc:
if exc.http_status == 404:
return False
raise
else:
return True
|
2c87fe5af06b18facb48da46655c188e38edf041
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/PAMTRI/PoseEstNet/trans.py
|
cb5a4b4abfa052ec6a9318f83ece9c796355b330
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,915
|
py
|
trans.py
|
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
########################## transform veri dataset ##########################
train veri dataset and get MultiTaskNet dataset:
python trans.py --cfg config.yaml --ckpt_path Your.ckpt --data_dir datapath
"""
import os
import argparse
import mindspore.dataset as ds
import mindspore.dataset.vision as vision
from mindspore import context
from mindspore.dataset.transforms.transforms import Compose
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.model import get_pose_net
from src.config import cfg, update_config
from src.utils.function import output_preds
from src.dataset import VeRiTransDataset
parser = argparse.ArgumentParser(description='Transform veri dataset')
parser.add_argument('--cfg', required=True, type=str)
parser.add_argument('--ckpt_path', type=str, default='')
parser.add_argument('--data_dir', type=str, default='')
parser.add_argument('--device_target', type=str, default="Ascend")
args = parser.parse_args()
if __name__ == '__main__':
update_config(cfg, args)
target = args.device_target
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False, device_id=device_id)
test_data = VeRiTransDataset(cfg, args.data_dir, 'test')
query_data = VeRiTransDataset(cfg, args.data_dir, 'query')
train_data = VeRiTransDataset(cfg, args.data_dir, 'train')
test_dataloader = ds.GeneratorDataset(test_data, column_names=['input', 'center', 'scale'],
num_parallel_workers=1, shuffle=False, num_shards=1, shard_id=0)
query_dataloader = ds.GeneratorDataset(query_data, column_names=['input', 'center', 'scale'],
num_parallel_workers=1, shuffle=False, num_shards=1, shard_id=0)
train_dataloader = ds.GeneratorDataset(train_data, column_names=['input', 'center', 'scale'],
num_parallel_workers=1, shuffle=False, num_shards=1, shard_id=0)
trans = Compose([
vision.ToTensor(),
vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False)
])
test_dataloader = test_dataloader.map(operations=trans, input_columns="input", num_parallel_workers=1)
test_dataloader = test_dataloader.batch(batch_size=32, drop_remainder=False, num_parallel_workers=1)
query_dataloader = query_dataloader.map(operations=trans, input_columns="input", num_parallel_workers=1)
query_dataloader = query_dataloader.batch(batch_size=32, drop_remainder=False, num_parallel_workers=1)
train_dataloader = train_dataloader.map(operations=trans, input_columns="input", num_parallel_workers=1)
train_dataloader = train_dataloader.batch(batch_size=32, drop_remainder=False, num_parallel_workers=1)
network = get_pose_net(cfg)
param_dict = load_checkpoint(args.ckpt_path)
load_param_into_net(network, param_dict)
output_preds(cfg, test_dataloader, test_data, network, args.data_dir, 'test', args.data_dir)
output_preds(cfg, query_dataloader, query_data, network, args.data_dir, 'query', args.data_dir)
output_preds(cfg, train_dataloader, train_data, network, args.data_dir, 'train', args.data_dir)
|
164b0e0de6645f3a7ddaa9d889d73f8536aea885
|
ad61cc119a42abfd3d64224a753817ae0f9ba058
|
/tests/unit/customizations/codeartifact/__init__.py
|
a25bae32ceeb89c08fcdfa6ce91a00bb42bfbb0d
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-cli
|
30b0e5b0fb6d736f1540990955f0a7351ee7a908
|
147d16dfdb72dc9cf362b676a57e46a49375afbd
|
refs/heads/develop
| 2023-09-03T19:52:07.955543
| 2023-09-01T20:37:50
| 2023-09-01T20:37:50
| 6,780,767
| 13,038
| 4,107
|
NOASSERTION
| 2023-09-13T19:48:11
| 2012-11-20T16:07:36
|
Python
|
UTF-8
|
Python
| false
| false
| 948
|
py
|
__init__.py
|
from awscli.testutils import unittest
from awscli.testutils import mock
from awscli.customizations.codeartifact import register_codeartifact_commands
from awscli.customizations.codeartifact import inject_commands
from awscli.customizations.codeartifact.login import CodeArtifactLogin
class TestRegisterCodeArtifactCommands(unittest.TestCase):
def test_register_codeartifact_commands(self):
event_emitter = mock.Mock()
register_codeartifact_commands(event_emitter)
event_emitter.register.assert_called_once_with(
'building-command-table.codeartifact', inject_commands
)
class TestInjectCommands(unittest.TestCase):
def test_inject_commands(self):
command_table = {}
session = mock.Mock()
inject_commands(command_table, session)
self.assertIn('login', command_table)
self.assertIsInstance(
command_table['login'], CodeArtifactLogin
)
|
a7ff05cab2fc0ad5b8c5c38d3afc6161e8877a8f
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/cloudservice/azext_cloudservice/generated/_params.py
|
9c63faa5f3dcffd9de3ab425b54ef48d2dd61778
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 13,874
|
py
|
_params.py
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from azure.cli.core.commands.parameters import (
tags_type,
get_three_state_flag,
get_enum_type,
resource_group_name_type,
get_location_type
)
from azure.cli.core.commands.validators import (
get_default_location_from_resource_group,
validate_file_or_dict
)
def load_arguments(self, _):
# All "cloud_service_name" miss help
with self.argument_context('cloud-service role-instance list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.')
with self.argument_context('cloud-service role-instance show') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance delete') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance rebuild') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance reimage') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance restart') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance show-instance-view') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance show-remote-desktop-file') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance wait') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.')
with self.argument_context('cloud-service role show') as c:
c.argument('role_name', type=str, help='Name of the role.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service list') as c:
c.argument('resource_group_name', resource_group_name_type)
with self.argument_context('cloud-service show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
with self.argument_context('cloud-service create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('package_url', type=str, help='Specifies a URL that refers to the location of the service package '
'in the Blob service. The service package URL can be Shared Access Signature (SAS) URI from any '
'storage account. This is a write-only property and is not returned in GET calls.')
c.argument('configuration', type=str, help='Specifies the XML service configuration (.cscfg) for the cloud '
'service.')
c.argument('configuration_url', type=str, help='Specifies a URL that refers to the location of the service '
'configuration in the Blob service. The service package URL can be Shared Access Signature (SAS) '
'URI from any storage account. This is a write-only property and is not returned in GET calls.')
c.argument('start_cloud_service', arg_type=get_three_state_flag(), help='(Optional) Indicates whether to start '
'the cloud service immediately after it is created. The default value is `true`. If false, the '
'service model is still deployed, but the code is not run immediately. Instead, the service is '
'PoweredOff until you call Start, at which time the service will be started. A deployed service '
'still incurs charges, even if it is poweredoff.')
c.argument('upgrade_mode', arg_type=get_enum_type(['Auto', 'Manual', 'Simultaneous']), help='Update mode for '
'the cloud service. Role instances are allocated to update domains when the service is deployed. '
'Updates can be initiated manually in each update domain or initiated automatically in all update '
'domains. Possible Values are <br /><br />**Auto**<br /><br />**Manual** <br /><br '
'/>**Simultaneous**<br /><br /> If not specified, the default value is Auto. If set to Manual, PUT '
'UpdateDomain must be called to apply the update. If set to Auto, the update is automatically '
'applied to each update domain in sequence.')
c.argument('extensions', type=validate_file_or_dict, help='List of extensions for the cloud service. Expected '
'value: json-string/@json-file.', arg_group='Extension Profile')
c.argument('load_balancer_configurations', type=validate_file_or_dict, help='The list of load balancer '
'configurations for the cloud service. Expected value: json-string/@json-file.', arg_group='Network '
'Profile')
c.argument('id_', options_list=['--id'], type=str, help='Resource Id', arg_group='Network Profile Swappable '
'Cloud Service')
c.argument('secrets', type=validate_file_or_dict, help='Specifies set of certificates that should be installed '
'onto the role instances. Expected value: json-string/@json-file.', arg_group='Os Profile')
c.argument('roles', type=validate_file_or_dict, help='List of roles for the cloud service. Expected value: '
'json-string/@json-file.', arg_group='Role Profile')
with self.argument_context('cloud-service update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
c.argument('tags', tags_type)
with self.argument_context('cloud-service delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
with self.argument_context('cloud-service delete-instance') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
c.argument('role_instances', nargs='+', help='List of cloud service role instance names. Value of \'*\' will '
'signify all role instances of the cloud service.')
with self.argument_context('cloud-service power-off') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
with self.argument_context('cloud-service rebuild') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
c.argument('role_instances', nargs='+', help='List of cloud service role instance names. Value of \'*\' will '
'signify all role instances of the cloud service.')
with self.argument_context('cloud-service reimage') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
c.argument('role_instances', nargs='+', help='List of cloud service role instance names. Value of \'*\' will '
'signify all role instances of the cloud service.')
with self.argument_context('cloud-service restart') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
c.argument('role_instances', nargs='+', help='List of cloud service role instance names. Value of \'*\' will '
'signify all role instances of the cloud service.')
with self.argument_context('cloud-service show-instance-view') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
with self.argument_context('cloud-service start') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
with self.argument_context('cloud-service wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
with self.argument_context('cloud-service update-domain list-update-domain') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Name of the cloud service.')
with self.argument_context('cloud-service update-domain show-update-domain') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Name of the cloud service.', id_part='name')
c.argument('update_domain', type=int, help='Specifies an integer value that identifies the update domain. '
'Update domains are identified with a zero-based index: the first update domain has an ID of 0, the '
'second has an ID of 1, and so on.', id_part='child_name_1')
with self.argument_context('cloud-service update-domain walk-update-domain') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Name of the cloud service.', id_part='name')
c.argument('update_domain', type=int, help='Specifies an integer value that identifies the update domain. '
'Update domains are identified with a zero-based index: the first update domain has an ID of 0, the '
'second has an ID of 1, and so on.', id_part='child_name_1')
|
6a8e1c21f094d454371aef32381d3bce7f2e70c4
|
a21ccc8ced1b57b351e3772e3939714e330de2d5
|
/src/related/__init__.py
|
c705ee24e769089c9e9abd6f6252bcc4f36d1b0a
|
[
"MIT"
] |
permissive
|
genomoncology/related
|
f7d4310fd5776441afd045a96f07f9dad9797a6c
|
2f3db6b07f5515792370d97f790cd46ec0882a9d
|
refs/heads/master
| 2023-01-12T08:46:16.068921
| 2022-08-05T13:34:47
| 2022-08-05T13:34:47
| 92,290,249
| 208
| 18
|
MIT
| 2022-12-27T17:03:18
| 2017-05-24T12:31:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
from .decorators import (
mutable,
immutable,
serializer,
)
from .types import (
ImmutableDict,
TypedSequence,
TypedMapping,
TypedSet,
)
from .fields import (
BooleanField,
ChildField,
DateField,
DateTimeField,
TimeField,
FloatField,
IntegerField,
MappingField,
RegexField,
SequenceField,
SetField,
StringField,
URLField,
UUIDField,
DecimalField,
)
from .functions import (
from_json,
from_yaml,
is_model,
to_dict,
to_json,
to_model,
to_yaml,
)
from . import dispatchers # noqa F401
__all__ = [
# decorators.py
"mutable",
"immutable",
"serializer",
# types.py
"ImmutableDict",
"TypedSequence",
"TypedMapping",
"TypedSet",
# fields.py
"BooleanField",
"ChildField",
"DateField",
"DateTimeField",
"TimeField",
"FloatField",
"IntegerField",
"MappingField",
"RegexField",
"SetField",
"StringField",
"SequenceField",
"URLField",
"UUIDField",
"DecimalField",
# functions.py
"from_json",
"from_yaml",
"is_model",
"to_dict",
"to_json",
"to_model",
"to_yaml",
]
__author__ = """Ian Maurer"""
__email__ = 'ian@genomoncology.com'
__version__ = '0.7.2'
__uri__ = "http://www.github.com/genomoncology/related"
__copyright__ = "Copyright (c) 2017 genomoncology.com"
__description__ = "Related: Straightforward nested object models in Python"
__doc__ = __description__ + " <" + __uri__ + ">"
__license__ = "MIT"
__title__ = "related"
|
59a1c576c78135173064b3584e071dbfdc751468
|
9734c93c86c982b1ce046340bac9e53645b261b8
|
/tests/parsers/esedb_plugins/msie_webcache.py
|
772e105736b7e8fb9f410c82b31f80b8d2ab3645
|
[
"Apache-2.0"
] |
permissive
|
log2timeline/plaso
|
cd72dd407d6c5627506c14f58cb8f6a6926aa808
|
d6022f8cfebfddf2d08ab2d300a41b61f3349933
|
refs/heads/main
| 2023-09-02T08:43:48.241198
| 2023-08-19T07:28:12
| 2023-08-19T07:28:12
| 23,812,315
| 1,506
| 421
|
Apache-2.0
| 2023-09-04T08:24:53
| 2014-09-08T23:29:28
|
Python
|
UTF-8
|
Python
| false
| false
| 5,391
|
py
|
msie_webcache.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Internet Explorer WebCache database."""
import unittest
from plaso.parsers.esedb_plugins import msie_webcache
from tests.parsers.esedb_plugins import test_lib
class MsieWebCacheESEDBPluginTest(test_lib.ESEDBPluginTestCase):
"""Tests for the MSIE WebCache ESE database plugin."""
# pylint: disable=protected-access
def testConvertHeadersValues(self):
"""Tests the _ConvertHeadersValues function."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
binary_value = (
b'HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n'
b'X-Content-Type-Options: nosniff\r\nContent-Length: 2759\r\n'
b'X-XSS-Protection: 1; mode=block\r\n'
b'Alternate-Protocol: 80:quic\r\n\r\n')
expected_headers_value = (
'[HTTP/1.1 200 OK; Content-Type: image/png; '
'X-Content-Type-Options: nosniff; Content-Length: 2759; '
'X-XSS-Protection: 1; mode=block; '
'Alternate-Protocol: 80:quic]')
headers_value = plugin._ConvertHeadersValues(binary_value)
self.assertEqual(headers_value, expected_headers_value)
def testProcessOnDatabaseWithPartitionsTable(self):
"""Tests the Process function on database with a Partitions table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['WebCacheV01.dat'], plugin)
number_of_event_data = storage_writer.GetNumberOfAttributeContainers(
'event_data')
self.assertEqual(number_of_event_data, 341)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
expected_event_values = {
'access_time': '2014-05-12T07:30:25.4861987+00:00',
'container_identifier': 1,
'data_type': 'msie:webcache:containers',
'directory': (
'C:\\Users\\test\\AppData\\Local\\Microsoft\\Windows\\'
'INetCache\\IE\\'),
'name': 'Content',
'set_identifier': 0}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 0)
self.CheckEventData(event_data, expected_event_values)
def testProcessOnDatabaseWithPartitionsExTable(self):
"""Tests the Process function on database with a PartitionsEx table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(
['PartitionsEx-WebCacheV01.dat'], plugin)
number_of_event_data = storage_writer.GetNumberOfAttributeContainers(
'event_data')
self.assertEqual(number_of_event_data, 1143)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 3)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
expected_event_values = {
'access_count': 5,
'cache_identifier': 0,
'cached_file_size': 726,
'cached_filename': 'b83d57c0[1].svg',
'container_identifier': 14,
'data_type': 'msie:webcache:container',
'entry_identifier': 63,
'modification_time': '2019-03-20T17:22:14.0000000+00:00',
'response_headers': (
'[HTTP/1.1 200; content-length: 726; content-type: image/svg+xml; '
'x-cache: TCP_HIT; x-msedge-ref: Ref A: 3CD5FCBC8EAD4E0A80FA41A62'
'FBC8CCC Ref B: PRAEDGE0910 Ref C: 2019-12-16T20:55:28Z; date: '
'Mon, 16 Dec 2019 20:55:28 GMT]'),
'synchronization_count': 0,
'url': 'https://www.bing.com/rs/3R/kD/ic/878ca0cd/b83d57c0.svg'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 211)
self.CheckEventData(event_data, expected_event_values)
def testProcessOnDatabaseWithCookiesExTable(self):
"""Tests the Process function on database with a CookiesEx table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(
['WebCacheV01_cookies.dat'], plugin)
number_of_event_data = storage_writer.GetNumberOfAttributeContainers(
'event_data')
self.assertEqual(number_of_event_data, 276)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
expected_event_values = {
'cookie_hash': '5b4342ed6e2b0ae16f7e2c4c',
'cookie_name': 'abid',
'cookie_value': 'fcc450d1-8674-1bd3-4074-a240cff5c5b1',
'cookie_value_raw': (
'66636334353064312d383637342d316264332d343037342d6132343063666'
'6356335623100'),
'data_type': 'msie:webcache:cookie',
'entry_identifier': 13,
'flags': 0x80082401,
'request_domain': 'com.associates-amazon' }
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 69)
self.CheckEventData(event_data, expected_event_values)
if __name__ == '__main__':
unittest.main()
|
87a205aec149ed8c4a94561b4793b6a17d0160e6
|
b3db95f1741e50140a6dd14f199cc585e3b61254
|
/tools/releasetools/merge/merge_target_files.py
|
d8f7b15a51aa4a1f72c7ce3c3e034082e0e1354a
|
[
"Apache-2.0"
] |
permissive
|
aosp-mirror/platform_build
|
58bc3f117b721b555203c04b9a2636c51dfc009d
|
ef9ba4d22bb56b0455a2d207300cf7ed18d8e5dc
|
refs/heads/main
| 2023-08-17T04:01:06.769170
| 2023-08-17T00:35:36
| 2023-08-17T00:35:36
| 65,832
| 215
| 251
| null | 2023-08-04T13:45:36
| 2008-10-21T18:19:56
|
Makefile
|
UTF-8
|
Python
| false
| false
| 23,639
|
py
|
merge_target_files.py
|
#!/usr/bin/env python
#
# Copyright (C) 2022 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""This script merges two partial target files packages.
One input package contains framework files, and the other contains vendor files.
This script produces a complete, merged target files package:
- This package can be used to generate a flashable IMG package.
See --output-img.
- This package can be used to generate an OTA package. See --output-ota.
- The merged package is checked for compatibility between the two inputs.
Usage: merge_target_files [args]
--framework-target-files framework-target-files-package
The input target files package containing framework bits. This is a zip
archive or a directory.
--framework-item-list framework-item-list-file
The optional path to a newline-separated config file of items that
are extracted as-is from the framework target files package.
--framework-misc-info-keys framework-misc-info-keys-file
The optional path to a newline-separated config file of keys to
extract from the framework META/misc_info.txt file.
--vendor-target-files vendor-target-files-package
The input target files package containing vendor bits. This is a zip
archive or a directory.
--vendor-item-list vendor-item-list-file
The optional path to a newline-separated config file of items that
are extracted as-is from the vendor target files package.
--output-target-files output-target-files-package
If provided, the output merged target files package. Also a zip archive.
--output-dir output-directory
If provided, the destination directory for saving merged files. Requires
the --output-item-list flag.
Can be provided alongside --output-target-files, or by itself.
--output-item-list output-item-list-file.
The optional path to a newline-separated config file that specifies the
file patterns to copy into the --output-dir. Required if providing
the --output-dir flag.
--output-ota output-ota-package
The output ota package. This is a zip archive. Use of this flag may
require passing the --path common flag; see common.py.
--output-img output-img-package
The output img package, suitable for use with 'fastboot update'. Use of
this flag may require passing the --path common flag; see common.py.
--output-super-empty output-super-empty-image
If provided, creates a super_empty.img file from the merged target
files package and saves it at this path.
--rebuild_recovery
Copy the recovery image used by non-A/B devices, used when
regenerating vendor images with --rebuild-sepolicy.
--allow-duplicate-apkapex-keys
If provided, duplicate APK/APEX keys are ignored and the value from the
framework is used.
--rebuild-sepolicy
If provided, rebuilds odm.img or vendor.img to include merged sepolicy
files. If odm is present then odm is preferred.
--vendor-otatools otatools.zip
If provided, use this otatools.zip when recompiling the odm or vendor
image to include sepolicy.
--keep-tmp
Keep tempoary files for debugging purposes.
The following only apply when using the VSDK to perform dexopt on vendor apps:
--framework-dexpreopt-config
If provided, the location of framwework's dexpreopt_config.zip.
--framework-dexpreopt-tools
if provided, the location of framework's dexpreopt_tools.zip.
--vendor-dexpreopt-config
If provided, the location of vendor's dexpreopt_config.zip.
"""
import logging
import os
import shutil
import subprocess
import sys
import zipfile
import add_img_to_target_files
import build_image
import build_super_image
import common
import img_from_target_files
import merge_compatibility_checks
import merge_dexopt
import merge_meta
import merge_utils
import ota_from_target_files
from common import ExternalError
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
# Always turn on verbose logging.
OPTIONS.verbose = True
OPTIONS.framework_target_files = None
OPTIONS.framework_item_list = []
OPTIONS.framework_misc_info_keys = []
OPTIONS.vendor_target_files = None
OPTIONS.vendor_item_list = []
OPTIONS.output_target_files = None
OPTIONS.output_dir = None
OPTIONS.output_item_list = []
OPTIONS.output_ota = None
OPTIONS.output_img = None
OPTIONS.output_super_empty = None
OPTIONS.rebuild_recovery = False
# TODO(b/150582573): Remove this option.
OPTIONS.allow_duplicate_apkapex_keys = False
OPTIONS.vendor_otatools = None
OPTIONS.rebuild_sepolicy = False
OPTIONS.keep_tmp = False
OPTIONS.framework_dexpreopt_config = None
OPTIONS.framework_dexpreopt_tools = None
OPTIONS.vendor_dexpreopt_config = None
def move_only_exists(source, destination):
"""Judge whether the file exists and then move the file."""
if os.path.exists(source):
shutil.move(source, destination)
def remove_file_if_exists(file_name):
"""Remove the file if it exists and skip otherwise."""
try:
os.remove(file_name)
except FileNotFoundError:
pass
def include_extra_in_list(item_list):
"""
1. Include all `META/*` files in the item list.
To ensure that `AddImagesToTargetFiles` can still be used with vendor item
list that do not specify all of the required META/ files, those files should
be included by default. This preserves the backward compatibility of
`rebuild_image_with_sepolicy`.
2. Include `SYSTEM/build.prop` file in the item list.
To ensure that `AddImagesToTargetFiles` for GRF vendor images, can still
access SYSTEM/build.prop to pass GetPartitionFingerprint check in BuildInfo
constructor.
"""
if not item_list:
return None
return list(item_list) + ['META/*'] + ['SYSTEM/build.prop']
def create_merged_package(temp_dir):
"""Merges two target files packages into one target files structure.
Returns:
Path to merged package under temp directory.
"""
# Extract "as is" items from the input framework and vendor partial target
# files packages directly into the output temporary directory, since these
# items do not need special case processing.
output_target_files_temp_dir = os.path.join(temp_dir, 'output')
merge_utils.CollectTargetFiles(
input_zipfile_or_dir=OPTIONS.framework_target_files,
output_dir=output_target_files_temp_dir,
item_list=OPTIONS.framework_item_list)
merge_utils.CollectTargetFiles(
input_zipfile_or_dir=OPTIONS.vendor_target_files,
output_dir=output_target_files_temp_dir,
item_list=OPTIONS.vendor_item_list)
# Perform special case processing on META/* items.
# After this function completes successfully, all the files we need to create
# the output target files package are in place.
merge_meta.MergeMetaFiles(
temp_dir=temp_dir, merged_dir=output_target_files_temp_dir)
merge_dexopt.MergeDexopt(
temp_dir=temp_dir, output_target_files_dir=output_target_files_temp_dir)
return output_target_files_temp_dir
def generate_missing_images(target_files_dir):
"""Generate any missing images from target files."""
# Regenerate IMAGES in the target directory.
add_img_args = [
'--verbose',
'--add_missing',
]
if OPTIONS.rebuild_recovery:
add_img_args.append('--rebuild_recovery')
add_img_args.append(target_files_dir)
add_img_to_target_files.main(add_img_args)
def rebuild_image_with_sepolicy(target_files_dir):
"""Rebuilds odm.img or vendor.img to include merged sepolicy files.
If odm is present then odm is preferred -- otherwise vendor is used.
"""
partition = 'vendor'
if os.path.exists(os.path.join(target_files_dir, 'ODM')):
partition = 'odm'
partition_img = '{}.img'.format(partition)
partition_map = '{}.map'.format(partition)
logger.info('Recompiling %s using the merged sepolicy files.', partition_img)
# Copy the combined SEPolicy file and framework hashes to the image that is
# being rebuilt.
def copy_selinux_file(input_path, output_filename):
input_filename = os.path.join(target_files_dir, input_path)
if not os.path.exists(input_filename):
input_filename = input_filename.replace('SYSTEM_EXT/',
'SYSTEM/system_ext/') \
.replace('PRODUCT/', 'SYSTEM/product/')
if not os.path.exists(input_filename):
logger.info('Skipping copy_selinux_file for %s', input_filename)
return
shutil.copy(
input_filename,
os.path.join(target_files_dir, partition.upper(), 'etc/selinux',
output_filename))
copy_selinux_file('META/combined_sepolicy', 'precompiled_sepolicy')
copy_selinux_file('SYSTEM/etc/selinux/plat_sepolicy_and_mapping.sha256',
'precompiled_sepolicy.plat_sepolicy_and_mapping.sha256')
copy_selinux_file(
'SYSTEM_EXT/etc/selinux/system_ext_sepolicy_and_mapping.sha256',
'precompiled_sepolicy.system_ext_sepolicy_and_mapping.sha256')
copy_selinux_file('PRODUCT/etc/selinux/product_sepolicy_and_mapping.sha256',
'precompiled_sepolicy.product_sepolicy_and_mapping.sha256')
if not OPTIONS.vendor_otatools:
# Remove the partition from the merged target-files archive. It will be
# rebuilt later automatically by generate_missing_images().
remove_file_if_exists(
os.path.join(target_files_dir, 'IMAGES', partition_img))
return
# TODO(b/192253131): Remove the need for vendor_otatools by fixing
# backwards-compatibility issues when compiling images across releases.
if not OPTIONS.vendor_target_files:
raise ValueError(
'Expected vendor_target_files if vendor_otatools is not None.')
logger.info(
'%s recompilation will be performed using the vendor otatools.zip',
partition_img)
# Unzip the vendor build's otatools.zip and target-files archive.
vendor_otatools_dir = common.MakeTempDir(
prefix='merge_target_files_vendor_otatools_')
vendor_target_files_dir = common.MakeTempDir(
prefix='merge_target_files_vendor_target_files_')
common.UnzipToDir(OPTIONS.vendor_otatools, vendor_otatools_dir)
merge_utils.CollectTargetFiles(
input_zipfile_or_dir=OPTIONS.vendor_target_files,
output_dir=vendor_target_files_dir,
item_list=include_extra_in_list(OPTIONS.vendor_item_list))
# Copy the partition contents from the merged target-files archive to the
# vendor target-files archive.
shutil.rmtree(os.path.join(vendor_target_files_dir, partition.upper()))
shutil.copytree(
os.path.join(target_files_dir, partition.upper()),
os.path.join(vendor_target_files_dir, partition.upper()),
symlinks=True)
# Delete then rebuild the partition.
remove_file_if_exists(
os.path.join(vendor_target_files_dir, 'IMAGES', partition_img))
rebuild_partition_command = [
os.path.join(vendor_otatools_dir, 'bin', 'add_img_to_target_files'),
'--verbose',
'--add_missing',
]
if OPTIONS.rebuild_recovery:
rebuild_partition_command.append('--rebuild_recovery')
rebuild_partition_command.append(vendor_target_files_dir)
logger.info('Recompiling %s: %s', partition_img,
' '.join(rebuild_partition_command))
common.RunAndCheckOutput(rebuild_partition_command, verbose=True)
# Move the newly-created image to the merged target files dir.
if not os.path.exists(os.path.join(target_files_dir, 'IMAGES')):
os.makedirs(os.path.join(target_files_dir, 'IMAGES'))
shutil.move(
os.path.join(vendor_target_files_dir, 'IMAGES', partition_img),
os.path.join(target_files_dir, 'IMAGES', partition_img))
move_only_exists(
os.path.join(vendor_target_files_dir, 'IMAGES', partition_map),
os.path.join(target_files_dir, 'IMAGES', partition_map))
def copy_recovery_file(filename):
for subdir in ('VENDOR', 'SYSTEM/vendor'):
source = os.path.join(vendor_target_files_dir, subdir, filename)
if os.path.exists(source):
dest = os.path.join(target_files_dir, subdir, filename)
shutil.copy(source, dest)
return
logger.info('Skipping copy_recovery_file for %s, file not found', filename)
if OPTIONS.rebuild_recovery:
copy_recovery_file('etc/recovery.img')
copy_recovery_file('bin/install-recovery.sh')
copy_recovery_file('recovery-from-boot.p')
def generate_super_empty_image(target_dir, output_super_empty):
"""Generates super_empty image from target package.
Args:
target_dir: Path to the target file package which contains misc_info.txt for
detailed information for super image.
output_super_empty: If provided, copies a super_empty.img file from the
target files package to this path.
"""
# Create super_empty.img using the merged misc_info.txt.
misc_info_txt = os.path.join(target_dir, 'META', 'misc_info.txt')
use_dynamic_partitions = common.LoadDictionaryFromFile(misc_info_txt).get(
'use_dynamic_partitions')
if use_dynamic_partitions != 'true' and output_super_empty:
raise ValueError(
'Building super_empty.img requires use_dynamic_partitions=true.')
elif use_dynamic_partitions == 'true':
super_empty_img = os.path.join(target_dir, 'IMAGES', 'super_empty.img')
build_super_image_args = [
misc_info_txt,
super_empty_img,
]
build_super_image.main(build_super_image_args)
# Copy super_empty.img to the user-provided output_super_empty location.
if output_super_empty:
shutil.copyfile(super_empty_img, output_super_empty)
def create_target_files_archive(output_zip, source_dir, temp_dir):
"""Creates a target_files zip archive from the input source dir.
Args:
output_zip: The name of the zip archive target files package.
source_dir: The target directory contains package to be archived.
temp_dir: Path to temporary directory for any intermediate files.
"""
output_target_files_list = os.path.join(temp_dir, 'output.list')
output_target_files_meta_dir = os.path.join(source_dir, 'META')
def files_from_path(target_path, extra_args=None):
"""Gets files under the given path and return a sorted list."""
find_command = ['find', target_path] + (extra_args or [])
find_process = common.Run(
find_command, stdout=subprocess.PIPE, verbose=False)
return common.RunAndCheckOutput(['sort'],
stdin=find_process.stdout,
verbose=False)
# META content appears first in the zip. This is done by the
# standard build system for optimized extraction of those files,
# so we do the same step for merged target_files.zips here too.
meta_content = files_from_path(output_target_files_meta_dir)
other_content = files_from_path(
source_dir,
['-path', output_target_files_meta_dir, '-prune', '-o', '-print'])
with open(output_target_files_list, 'w') as f:
f.write(meta_content)
f.write(other_content)
command = [
'soong_zip',
'-d',
'-o',
os.path.abspath(output_zip),
'-C',
source_dir,
'-r',
output_target_files_list,
]
logger.info('creating %s', output_zip)
common.RunAndCheckOutput(command, verbose=True)
logger.info('finished creating %s', output_zip)
def merge_target_files(temp_dir):
"""Merges two target files packages together.
This function uses framework and vendor target files packages as input,
performs various file extractions, special case processing, and finally
creates a merged zip archive as output.
Args:
temp_dir: The name of a directory we use when we extract items from the
input target files packages, and also a scratch directory that we use for
temporary files.
"""
logger.info('starting: merge framework %s and vendor %s into output %s',
OPTIONS.framework_target_files, OPTIONS.vendor_target_files,
OPTIONS.output_target_files)
output_target_files_temp_dir = create_merged_package(temp_dir)
partition_map = common.PartitionMapFromTargetFiles(
output_target_files_temp_dir)
compatibility_errors = merge_compatibility_checks.CheckCompatibility(
target_files_dir=output_target_files_temp_dir,
partition_map=partition_map)
if compatibility_errors:
for error in compatibility_errors:
logger.error(error)
raise ExternalError(
'Found incompatibilities in the merged target files package.')
# Include the compiled policy in an image if requested.
if OPTIONS.rebuild_sepolicy:
rebuild_image_with_sepolicy(output_target_files_temp_dir)
generate_missing_images(output_target_files_temp_dir)
generate_super_empty_image(output_target_files_temp_dir,
OPTIONS.output_super_empty)
# Finally, create the output target files zip archive and/or copy the
# output items to the output target files directory.
if OPTIONS.output_dir:
merge_utils.CopyItems(output_target_files_temp_dir, OPTIONS.output_dir,
OPTIONS.output_item_list)
if not OPTIONS.output_target_files:
return
create_target_files_archive(OPTIONS.output_target_files,
output_target_files_temp_dir, temp_dir)
# Create the IMG package from the merged target files package.
if OPTIONS.output_img:
img_from_target_files.main(
[OPTIONS.output_target_files, OPTIONS.output_img])
# Create the OTA package from the merged target files package.
if OPTIONS.output_ota:
ota_from_target_files.main(
[OPTIONS.output_target_files, OPTIONS.output_ota])
def main():
"""The main function.
Process command line arguments, then call merge_target_files to
perform the heavy lifting.
"""
common.InitLogging()
def option_handler(o, a):
if o == '--system-target-files':
logger.warning(
'--system-target-files has been renamed to --framework-target-files')
OPTIONS.framework_target_files = a
elif o == '--framework-target-files':
OPTIONS.framework_target_files = a
elif o == '--system-item-list':
logger.warning(
'--system-item-list has been renamed to --framework-item-list')
OPTIONS.framework_item_list = a
elif o == '--framework-item-list':
OPTIONS.framework_item_list = a
elif o == '--system-misc-info-keys':
logger.warning('--system-misc-info-keys has been renamed to '
'--framework-misc-info-keys')
OPTIONS.framework_misc_info_keys = a
elif o == '--framework-misc-info-keys':
OPTIONS.framework_misc_info_keys = a
elif o == '--other-target-files':
logger.warning(
'--other-target-files has been renamed to --vendor-target-files')
OPTIONS.vendor_target_files = a
elif o == '--vendor-target-files':
OPTIONS.vendor_target_files = a
elif o == '--other-item-list':
logger.warning('--other-item-list has been renamed to --vendor-item-list')
OPTIONS.vendor_item_list = a
elif o == '--vendor-item-list':
OPTIONS.vendor_item_list = a
elif o == '--output-target-files':
OPTIONS.output_target_files = a
elif o == '--output-dir':
OPTIONS.output_dir = a
elif o == '--output-item-list':
OPTIONS.output_item_list = a
elif o == '--output-ota':
OPTIONS.output_ota = a
elif o == '--output-img':
OPTIONS.output_img = a
elif o == '--output-super-empty':
OPTIONS.output_super_empty = a
elif o == '--rebuild_recovery' or o == '--rebuild-recovery':
OPTIONS.rebuild_recovery = True
elif o == '--allow-duplicate-apkapex-keys':
OPTIONS.allow_duplicate_apkapex_keys = True
elif o == '--vendor-otatools':
OPTIONS.vendor_otatools = a
elif o == '--rebuild-sepolicy':
OPTIONS.rebuild_sepolicy = True
elif o == '--keep-tmp':
OPTIONS.keep_tmp = True
elif o == '--framework-dexpreopt-config':
OPTIONS.framework_dexpreopt_config = a
elif o == '--framework-dexpreopt-tools':
OPTIONS.framework_dexpreopt_tools = a
elif o == '--vendor-dexpreopt-config':
OPTIONS.vendor_dexpreopt_config = a
else:
return False
return True
args = common.ParseOptions(
sys.argv[1:],
__doc__,
extra_long_opts=[
'system-target-files=',
'framework-target-files=',
'system-item-list=',
'framework-item-list=',
'system-misc-info-keys=',
'framework-misc-info-keys=',
'other-target-files=',
'vendor-target-files=',
'other-item-list=',
'vendor-item-list=',
'output-target-files=',
'output-dir=',
'output-item-list=',
'output-ota=',
'output-img=',
'output-super-empty=',
'framework-dexpreopt-config=',
'framework-dexpreopt-tools=',
'vendor-dexpreopt-config=',
'rebuild_recovery',
'rebuild-recovery',
'allow-duplicate-apkapex-keys',
'vendor-otatools=',
'rebuild-sepolicy',
'keep-tmp',
],
extra_option_handler=option_handler)
# pylint: disable=too-many-boolean-expressions
if (args or OPTIONS.framework_target_files is None or
OPTIONS.vendor_target_files is None or
(OPTIONS.output_target_files is None and OPTIONS.output_dir is None) or
(OPTIONS.output_dir is not None and not OPTIONS.output_item_list) or
(OPTIONS.rebuild_recovery and not OPTIONS.rebuild_sepolicy)):
common.Usage(__doc__)
sys.exit(1)
framework_namelist = merge_utils.GetTargetFilesItems(
OPTIONS.framework_target_files)
vendor_namelist = merge_utils.GetTargetFilesItems(
OPTIONS.vendor_target_files)
if OPTIONS.framework_item_list:
OPTIONS.framework_item_list = common.LoadListFromFile(
OPTIONS.framework_item_list)
else:
OPTIONS.framework_item_list = merge_utils.InferItemList(
input_namelist=framework_namelist, framework=True)
OPTIONS.framework_partition_set = merge_utils.ItemListToPartitionSet(
OPTIONS.framework_item_list)
if OPTIONS.framework_misc_info_keys:
OPTIONS.framework_misc_info_keys = common.LoadListFromFile(
OPTIONS.framework_misc_info_keys)
else:
OPTIONS.framework_misc_info_keys = merge_utils.InferFrameworkMiscInfoKeys(
input_namelist=framework_namelist)
if OPTIONS.vendor_item_list:
OPTIONS.vendor_item_list = common.LoadListFromFile(OPTIONS.vendor_item_list)
else:
OPTIONS.vendor_item_list = merge_utils.InferItemList(
input_namelist=vendor_namelist, framework=False)
OPTIONS.vendor_partition_set = merge_utils.ItemListToPartitionSet(
OPTIONS.vendor_item_list)
if OPTIONS.output_item_list:
OPTIONS.output_item_list = common.LoadListFromFile(OPTIONS.output_item_list)
if not merge_utils.ValidateConfigLists():
sys.exit(1)
temp_dir = common.MakeTempDir(prefix='merge_target_files_')
try:
merge_target_files(temp_dir)
finally:
if OPTIONS.keep_tmp:
logger.info('Keeping temp_dir %s', temp_dir)
else:
common.Cleanup()
if __name__ == '__main__':
main()
|
80f43f415854f4ec4c6d8f31a1ca0b144a512ebc
|
4d13e699f3a0466fc1ee9f41a36dd14c49bd60de
|
/rlzoo/algorithms/td3/default.py
|
8994450c45cede716205376cdfd035fb0a22e567
|
[
"Apache-2.0"
] |
permissive
|
tensorlayer/RLzoo
|
658db88dbbb59b2facc37b1dfe2020ea3a6e058c
|
e3ed8a57bd8130bd7b663f213a388ce972925f30
|
refs/heads/master
| 2023-09-04T08:29:06.899553
| 2022-12-15T16:45:01
| 2022-12-15T16:45:01
| 198,997,104
| 842
| 120
|
Apache-2.0
| 2023-03-24T22:35:03
| 2019-07-26T10:23:27
|
Python
|
UTF-8
|
Python
| false
| false
| 18,135
|
py
|
default.py
|
from rlzoo.common.policy_networks import *
from rlzoo.common.value_networks import *
from rlzoo.common.utils import set_seed
"""
full list of algorithm parameters (alg_params)
-----------------------------------------------
net_list: a list of networks (value and policy) used in the algorithm, from common functions or customization
optimizers_list: a list of optimizers for all networks and differentiable variables
replay_buffer_capacity: the size of buffer for storing explored samples
policy_target_update_interval: delayed interval for updating the target policy
-----------------------------------------------
full list of learning parameters (learn_params)
-----------------------------------------------
train_episodes: total number of episodes for training
test_episodes: total number of episodes for testing
max_steps: maximum number of steps for one episode
batch_size: udpate batchsize
explore_steps: for random action sampling in the beginning of training
update_itr: repeated updates for single step
reward_scale: value range of reward
save_interval: timesteps for saving the weights and plotting the results
explore_noise_scale: range of action noise for exploration
eval_noise_scale: range of action noise for evaluation of action value
mode: 'train' or 'test'
render: if true, visualize the environment
-----------------------------------------------
"""
def classic_control(env, default_seed=True):
if default_seed:
seed = 2
set_seed(seed, env) # reproducible
alg_params = dict(
replay_buffer_capacity=5e5,
policy_target_update_interval=5,
)
if alg_params.get('net_list') is None:
num_hidden_layer = 2 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('TD3'):
with tf.name_scope('Q_Net1'):
q_net1 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Q_Net2'):
q_net2 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Q_Net1'):
target_q_net1 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Q_Net2'):
target_q_net2 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Policy'):
policy_net = DeterministicPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Policy'):
target_policy_net = DeterministicPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
net_list = [q_net1, q_net2, target_q_net1, target_q_net2, policy_net, target_policy_net]
alg_params['net_list'] = net_list
if alg_params.get('optimizers_list') is None:
q_lr, policy_lr = 3e-4, 3e-4 # q_lr: learning rate of the Q network; policy_lr: learning rate of the policy network
q_optimizer1 = tf.optimizers.Adam(q_lr)
q_optimizer2 = tf.optimizers.Adam(q_lr)
policy_optimizer = tf.optimizers.Adam(policy_lr)
optimizers_list = [q_optimizer1, q_optimizer2, policy_optimizer]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(
max_steps=150,
batch_size=64,
explore_steps=500,
update_itr=3,
reward_scale=1.,
explore_noise_scale=1.0,
eval_noise_scale=0.5,
train_episodes=100,
test_episodes=10,
save_interval=10,
)
return alg_params, learn_params
def box2d(env, default_seed=True):
if default_seed:
seed = 2
set_seed(seed, env) # reproducible
alg_params = dict(
replay_buffer_capacity=5e5,
policy_target_update_interval=5,
)
if alg_params.get('net_list') is None:
num_hidden_layer = 2 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('TD3'):
with tf.name_scope('Q_Net1'):
q_net1 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Q_Net2'):
q_net2 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Q_Net1'):
target_q_net1 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Q_Net2'):
target_q_net2 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Policy'):
policy_net = DeterministicPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Policy'):
target_policy_net = DeterministicPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
net_list = [q_net1, q_net2, target_q_net1, target_q_net2, policy_net, target_policy_net]
alg_params['net_list'] = net_list
if alg_params.get('optimizers_list') is None:
q_lr, policy_lr = 3e-4, 3e-4 # q_lr: learning rate of the Q network; policy_lr: learning rate of the policy network
q_optimizer1 = tf.optimizers.Adam(q_lr)
q_optimizer2 = tf.optimizers.Adam(q_lr)
policy_optimizer = tf.optimizers.Adam(policy_lr)
optimizers_list = [q_optimizer1, q_optimizer2, policy_optimizer]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(
max_steps=150,
batch_size=64,
explore_steps=500,
update_itr=3,
reward_scale=1.,
explore_noise_scale=1.0,
eval_noise_scale=0.5,
train_episodes=100,
test_episodes=10,
save_interval=10,
)
return alg_params, learn_params
def mujoco(env, default_seed=True):
if default_seed:
seed = 2
set_seed(seed, env) # reproducible
alg_params = dict(
replay_buffer_capacity=5e5,
policy_target_update_interval=5,
)
if alg_params.get('net_list') is None:
num_hidden_layer = 2 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('TD3'):
with tf.name_scope('Q_Net1'):
q_net1 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Q_Net2'):
q_net2 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Q_Net1'):
target_q_net1 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Q_Net2'):
target_q_net2 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Policy'):
policy_net = DeterministicPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Policy'):
target_policy_net = DeterministicPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
net_list = [q_net1, q_net2, target_q_net1, target_q_net2, policy_net, target_policy_net]
alg_params['net_list'] = net_list
if alg_params.get('optimizers_list') is None:
q_lr, policy_lr = 3e-4, 3e-4 # q_lr: learning rate of the Q network; policy_lr: learning rate of the policy network
q_optimizer1 = tf.optimizers.Adam(q_lr)
q_optimizer2 = tf.optimizers.Adam(q_lr)
policy_optimizer = tf.optimizers.Adam(policy_lr)
optimizers_list = [q_optimizer1, q_optimizer2, policy_optimizer]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(
max_steps=150,
batch_size=64,
explore_steps=500,
update_itr=3,
reward_scale=1.,
explore_noise_scale=1.0,
eval_noise_scale=0.5,
train_episodes=100,
test_episodes=10,
save_interval=10,
)
return alg_params, learn_params
def robotics(env, default_seed=True):
if default_seed:
seed = 2
set_seed(seed, env) # reproducible
alg_params = dict(
replay_buffer_capacity=5e5,
policy_target_update_interval=5,
)
if alg_params.get('net_list') is None:
num_hidden_layer = 2 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('TD3'):
with tf.name_scope('Q_Net1'):
q_net1 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Q_Net2'):
q_net2 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Q_Net1'):
target_q_net1 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Q_Net2'):
target_q_net2 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Policy'):
policy_net = DeterministicPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Policy'):
target_policy_net = DeterministicPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
net_list = [q_net1, q_net2, target_q_net1, target_q_net2, policy_net, target_policy_net]
alg_params['net_list'] = net_list
if alg_params.get('optimizers_list') is None:
q_lr, policy_lr = 3e-4, 3e-4 # q_lr: learning rate of the Q network; policy_lr: learning rate of the policy network
q_optimizer1 = tf.optimizers.Adam(q_lr)
q_optimizer2 = tf.optimizers.Adam(q_lr)
policy_optimizer = tf.optimizers.Adam(policy_lr)
optimizers_list = [q_optimizer1, q_optimizer2, policy_optimizer]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(
max_steps=150,
batch_size=64,
explore_steps=500,
update_itr=3,
reward_scale=1.,
explore_noise_scale=1.0,
eval_noise_scale=0.5,
train_episodes=100,
test_episodes=10,
save_interval=10,
)
return alg_params, learn_params
def dm_control(env, default_seed=True):
if default_seed:
seed = 2
set_seed(seed, env) # reproducible
alg_params = dict(
replay_buffer_capacity=5e5,
policy_target_update_interval=5,
)
if alg_params.get('net_list') is None:
num_hidden_layer = 2 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('TD3'):
with tf.name_scope('Q_Net1'):
q_net1 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Q_Net2'):
q_net2 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Q_Net1'):
target_q_net1 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Q_Net2'):
target_q_net2 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Policy'):
policy_net = DeterministicPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Policy'):
target_policy_net = DeterministicPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
net_list = [q_net1, q_net2, target_q_net1, target_q_net2, policy_net, target_policy_net]
alg_params['net_list'] = net_list
if alg_params.get('optimizers_list') is None:
q_lr, policy_lr = 3e-4, 3e-4 # q_lr: learning rate of the Q network; policy_lr: learning rate of the policy network
q_optimizer1 = tf.optimizers.Adam(q_lr)
q_optimizer2 = tf.optimizers.Adam(q_lr)
policy_optimizer = tf.optimizers.Adam(policy_lr)
optimizers_list = [q_optimizer1, q_optimizer2, policy_optimizer]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(
max_steps=150,
batch_size=64,
explore_steps=500,
update_itr=3,
reward_scale=1.,
explore_noise_scale=1.0,
eval_noise_scale=0.5,
train_episodes=100,
test_episodes=10,
save_interval=10,
)
return alg_params, learn_params
def rlbench(env, default_seed=True):
if default_seed:
seed = 2
set_seed(seed, env) # reproducible
alg_params = dict(
replay_buffer_capacity=5e5,
policy_target_update_interval=5,
)
if alg_params.get('net_list') is None:
num_hidden_layer = 2 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('TD3'):
with tf.name_scope('Q_Net1'):
q_net1 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Q_Net2'):
q_net2 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Q_Net1'):
target_q_net1 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Q_Net2'):
target_q_net2 = QNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Policy'):
policy_net = DeterministicPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Target_Policy'):
target_policy_net = DeterministicPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
net_list = [q_net1, q_net2, target_q_net1, target_q_net2, policy_net, target_policy_net]
alg_params['net_list'] = net_list
if alg_params.get('optimizers_list') is None:
q_lr, policy_lr = 3e-4, 3e-4 # q_lr: learning rate of the Q network; policy_lr: learning rate of the policy network
q_optimizer1 = tf.optimizers.Adam(q_lr)
q_optimizer2 = tf.optimizers.Adam(q_lr)
policy_optimizer = tf.optimizers.Adam(policy_lr)
optimizers_list = [q_optimizer1, q_optimizer2, policy_optimizer]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(
max_steps=150,
batch_size=64,
explore_steps=500,
update_itr=3,
reward_scale=1.,
explore_noise_scale=1.0,
eval_noise_scale=0.5,
train_episodes=100,
test_episodes=10,
save_interval=10,
)
return alg_params, learn_params
|
23fb0ad72c910cb16139b479448f958a572f43b8
|
7f620e7902c0b9ccb1fcfd1427acd5936ea33814
|
/tests/http_srv.py
|
d6103147a1ba0f476efdb1a014497bca8441321d
|
[
"Apache-2.0"
] |
permissive
|
mlrun/mlrun
|
2074c230070129ce3becb211b92c90b29a2ce850
|
b5fe0c05ae7f5818a4a5a5a40245c851ff9b2c77
|
refs/heads/development
| 2023-09-06T00:09:21.546135
| 2023-09-05T19:38:13
| 2023-09-05T19:38:13
| 205,706,595
| 1,093
| 229
|
Apache-2.0
| 2023-09-14T14:14:10
| 2019-09-01T16:59:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,187
|
py
|
http_srv.py
|
# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from mlrun.runtimes.function import fake_nuclio_context
def example_function(context, event):
print(event.body)
return '{"aa": 5}'
class Handler(BaseHTTPRequestHandler):
def do_call(self):
print(f"got {self.command} request to {self.path}")
body = self.rfile.read(int(self.headers["Content-Length"]))
context, event = fake_nuclio_context(body, headers=self.headers)
resp = self.handler_function(context, event)
if isinstance(resp, str):
resp = bytes(resp.encode())
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(resp)
# self.wfile.close()
# return
def do_GET(self):
print("path:", self.path)
print("headers:", self.headers)
print("command:", self.command)
print("request:", self.request)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"Hello world")
def do_POST(self):
self.do_call()
def do_PUT(self):
self.do_call()
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
pass
def create_function(handler, port):
def func_wrap(self, context, event):
return handler(context, event)
CustomHandler = Handler
CustomHandler.handler_function = func_wrap
server = ThreadingSimpleServer(("0.0.0.0", port), CustomHandler)
server.serve_forever()
|
eaa9910e0f6b7fc1c497c1493b4e3d4238716516
|
6c4518d19073edefa988253ab978dce804a86fd0
|
/tests/functional/python_tests/cli_wallet_extended_tests/test_transaction_managing.py
|
843cb48ab948df51b9ff2e56b47bfdf5a9873159
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
openhive-network/hive
|
9c975d7f27729424306ae46a8971a1cb50d9bade
|
faa8b1d33aead9e555b98adb78a5183634d9f8f5
|
refs/heads/master
| 2023-08-08T10:29:51.616373
| 2023-04-04T22:02:30
| 2023-04-05T16:48:29
| 248,639,972
| 348
| 112
|
NOASSERTION
| 2021-08-28T22:34:07
| 2020-03-20T01:18:32
|
C++
|
UTF-8
|
Python
| false
| false
| 2,226
|
py
|
test_transaction_managing.py
|
import datetime
import test_tools as tt
def test_transaction(wallet):
wallet.api.create_account('initminer', 'carol', '{}')
with wallet.in_single_transaction(broadcast=False) as transaction:
wallet.api.transfer_to_vesting('initminer', 'carol', tt.Asset.Test(100))
wallet.api.transfer('initminer', 'carol', tt.Asset.Test(500), 'kiwi')
wallet.api.transfer('initminer', 'carol', tt.Asset.Tbd(50), 'orange')
_result_trx_response = transaction.get_response()
_result = wallet.api.get_account('carol')
assert _result['balance'] == tt.Asset.Test(0)
assert _result['hbd_balance'] == tt.Asset.Tbd(0)
assert _result['vesting_shares'] == tt.Asset.Vest(0)
assert wallet.api.serialize_transaction(_result_trx_response) != '00000000000000000000000000'
wallet.api.sign_transaction(_result_trx_response)
_result = wallet.api.get_account('carol')
assert _result['balance'] == tt.Asset.Test(500)
assert _result['hbd_balance'] == tt.Asset.Tbd(50)
assert _result['vesting_shares'] != tt.Asset.Vest(0)
_time = datetime.datetime.utcnow()
_before_seconds = (int)(_time.timestamp())
tt.logger.info('_time: {} seconds:{}...'.format(_time, _before_seconds))
response = wallet.api.transfer_to_savings('initminer', 'carol', tt.Asset.Test(0.007), 'plum')
_expiration = response['expiration']
parsed_t = tt.Time.parse(_expiration)
t_in_seconds = parsed_t.timestamp()
tt.logger.info('_time: {} seconds:{}...'.format(_expiration, t_in_seconds))
_val = t_in_seconds - _before_seconds
assert _val == 30 or _val == 31
assert wallet.api.set_transaction_expiration(678) is None
_time = datetime.datetime.utcnow()
_before_seconds = (int)(_time.timestamp())
tt.logger.info('_time: {} seconds:{}...'.format(_time, _before_seconds))
response = wallet.api.transfer_to_savings('initminer', 'carol', tt.Asset.Test(0.008), 'lemon')
_expiration = response['expiration']
parsed_t = tt.Time.parse(_expiration)
t_in_seconds = parsed_t.timestamp()
tt.logger.info('_time: {} seconds:{}...'.format(_expiration, t_in_seconds))
_val = t_in_seconds - _before_seconds
assert _val == 678 or _val == 679
|
7813a6e6459a1713c2ad13ce28cb787d90569a68
|
438f82adbaa27bcb97cce0171f377ddc92586f48
|
/pulumi/infra/consul_config.py
|
e243dafe4df750fb8ce03eec6a0570f6a353c879
|
[
"Apache-2.0"
] |
permissive
|
grapl-security/grapl
|
5f93599969ec604df25712c1d16648d16de67072
|
b2c7ef263fb8134add2febb770da164ea7b4936f
|
refs/heads/main
| 2023-08-12T11:38:11.167343
| 2022-12-26T15:28:55
| 2022-12-26T15:28:55
| 151,994,099
| 386
| 60
|
Apache-2.0
| 2022-12-10T05:56:55
| 2018-10-07T23:28:27
|
Rust
|
UTF-8
|
Python
| false
| false
| 3,331
|
py
|
consul_config.py
|
import json
import pulumi_consul as consul
import pulumi
class ConsulConfig(pulumi.ComponentResource):
"""
Consul config entries
"""
def __init__(
self,
name: str,
tracing_endpoint: str,
opts: pulumi.ResourceOptions | None = None,
) -> None:
super().__init__("grapl:ConsulConfig", name, None, opts)
# Instead of using a reading a hcl file or a template, we're just going to define it here as a plain python dict
# so it's easy to pass in an url.
config = {
"Config": [
{
"envoy_extra_static_clusters_json": json.dumps(
{
"name": "zipkin",
"type": "STRICT_DNS",
"connect_timeout": "5s",
"load_assignment": {
"cluster_name": "zipkin",
"endpoints": [
{
"lb_endpoints": [
{
"endpoint": {
"address": {
"socket_address": {
"address": tracing_endpoint,
"port_value": 9411,
}
}
}
}
]
}
],
},
}
),
"envoy_stats_flush_interval": "10s",
"envoy_tracing_json": json.dumps(
{
"http": {
"name": "envoy.tracers.zipkin",
"typedConfig": {
"@type": "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig",
"collector_cluster": "zipkin",
"collector_endpoint_version": "HTTP_JSON",
"collector_endpoint": "/api/v2/spans",
"shared_span_context": False,
# This 128-bit MUST be set to True for Jaeger, otherwise tracing fails silently
"trace_id_128bit": True,
},
}
}
),
"prometheus_bind_addr": "0.0.0.0:9102",
"protocol": "grpc",
}
]
}
consul.ConfigEntry(
resource_name=f"{name}-proxy-defaults",
kind="proxy-defaults",
name="global",
config_json=json.dumps(config),
opts=pulumi.ResourceOptions.merge(
opts, pulumi.ResourceOptions(parent=self)
),
)
|
98a1453308377e8b73931542e2d010803f636edf
|
7491ceb405287660538e876317d3f69328757651
|
/aydin/it/transforms/demo/demo_fixed_pattern_real.py
|
b48c0c8d4bfda298b38572bcfb18421f37dc6828
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
royerlab/aydin
|
4d0bd5cb1a3786cf32f1d8661d3a3aa13ec7cab1
|
9312f227605be26fce960373c1f29a71323da914
|
refs/heads/master
| 2023-04-29T20:45:42.515226
| 2023-02-16T22:21:07
| 2023-02-16T22:21:07
| 188,953,977
| 125
| 14
|
BSD-3-Clause
| 2023-03-15T01:04:16
| 2019-05-28T04:30:19
|
Python
|
UTF-8
|
Python
| false
| false
| 644
|
py
|
demo_fixed_pattern_real.py
|
from aydin.io.datasets import examples_single
from aydin.it.transforms.fixedpattern import FixedPatternTransform
from aydin.util.log.log import Log
def demo_fixed_pattern_real():
Log.override_test_exclusion = True
image = examples_single.huang_fixed_pattern_noise.get_array() # [:, 0:64, 0:64]
bs = FixedPatternTransform() # axes=[1, 2])
pre_processed = bs.preprocess(image)
import napari
with napari.gui_qt():
viewer = napari.Viewer()
viewer.add_image(image, name='image')
viewer.add_image(pre_processed, name='pre_processed')
if __name__ == "__main__":
demo_fixed_pattern_real()
|
18c5731de15117d9c88d39794b3cd93a227c79f0
|
2dfcde05b04318ed5f4b1992ef0aa1eaa5052ccb
|
/ansible_collections/juniper/device/plugins/modules/facts.py
|
e6082ccc42621065b36d58444678704230a02883
|
[
"Apache-2.0"
] |
permissive
|
Juniper/ansible-junos-stdlib
|
87bbeb28e308f7e03bc7e8a19d1877185bb601ec
|
e3f6fca99461b8f40d4d59bb7dcc8b3892722ad5
|
refs/heads/master
| 2023-08-31T13:22:37.759400
| 2023-08-02T04:04:30
| 2023-08-02T04:04:30
| 16,183,015
| 299
| 199
|
Apache-2.0
| 2023-08-02T04:04:31
| 2014-01-23T19:28:58
|
Python
|
UTF-8
|
Python
| false
| false
| 13,969
|
py
|
facts.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2020, Juniper Networks Inc. All rights reserved.
#
# License: Apache 2.0
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the Juniper Networks nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Juniper Networks, Inc. ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Juniper Networks, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import, division, print_function
ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community',
'status': ['stableinterface']}
DOCUMENTATION = '''
---
extends_documentation_fragment:
- juniper_junos_common.connection_documentation
- juniper_junos_common.logging_documentation
module: facts
author: "Juniper Networks - Stacy Smith (@stacywsmith)"
short_description: Retrieve facts from a Junos device
description:
- Retrieve facts from a Junos device using the
U(PyEZ fact gathering system|http://junos-pyez.readthedocs.io/en/stable/jnpr.junos.facts.html).
- Also returns the committed configuration of the Junos device if the
I(config_format) option has a value other than C(none).
options:
config_format:
description:
- The format of the configuration returned. The specified format must be
supported by the target Junos device.
required: false
default: none
choices:
- none
- xml
- set
- text
- json
savedir:
description:
- A path to a directory, on the Ansible control machine, where facts
will be stored in a JSON file.
- The resulting JSON file is saved in
I(savedir)C(/)I(hostname)C(-facts.json).
- The I(savedir) directory is the value of the I(savedir) option.
- The I(hostname)C(-facts.json) filename begins with the value of the
C(hostname) fact returned from the Junos device, which might be
different than the value of the I(host) option passed to the module.
- If the value of the I(savedir) option is C(none), the default, then
facts are NOT saved to a file.
required: false
default: none
type: path
'''
EXAMPLES = '''
---
- name: 'Explicit host argument'
hosts: junos
connection: local
gather_facts: no
collections:
- juniper.device
tasks:
- name: "Get facts"
facts:
register: response
- name: Facts with login credentials
facts:
host: "10.x.x.x"
user: "user"
passwd: "user123"
port: "22"
- name: Facts in telnet mode
facts:
host: "10.x.x.x"
user: "user"
passwd: "user123"
port: "23"
mode: "telnet"
# Print a fact
# Using config_format option
# Print the config
# Using savedir option
# Print the saved JSON file
'''
RETURN = '''
ansible_facts.junos:
description:
- Facts collected from the Junos device. This dictionary contains the
keys listed in the I(contains) section of this documentation PLUS all
of the keys returned from PyEZ's fact gathering system. See
U(PyEZ facts|http://junos-pyez.readthedocs.io/en/stable/jnpr.junos.facts.html)
for a complete list of these keys and their meaning.
returned: success
type: complex
contains:
config:
description:
- The device's committed configuration, in the format specified by
I(config_format), as a single multi-line string.
returned: when I(config_format) is not C(none).
type: str
has_2RE:
description:
- Indicates if the device has more than one Routing Engine installed.
Because Ansible does not allow keys to begin with a number, this fact
is returned in place of PyEZ's C(2RE) fact.
returned: success
type: bool
re_name:
description:
- The name of the current Routing Engine to which Ansible is connected.
returned: success
type: str
master_state:
description:
- The mastership state of the Routing Engine to which Ansible is
connected. C(true) if the RE is the master Routing Engine. C(false)
if the RE is not the master Routing Engine.
returned: success
type: bool
changed:
description:
- Indicates if the device's state has changed. Since this module does not
change the operational or configuration state of the device, the value is
always set to C(false).
returned: success
type: bool
sample: false
facts:
description:
- Returned for backwards compatibility. Returns the same keys and values
which are returned under I(ansible_facts.junos).
returned: success
type: dict
failed:
description:
- Indicates if the task failed.
returned: always
type: bool
sample: false
'''
# Standard library imports
import json
import os.path
"""From Ansible 2.1, Ansible uses Ansiballz framework for assembling modules
But custom module_utils directory is supported from Ansible 2.3
Reference for the issue: https://groups.google.com/forum/#!topic/ansible-project/J8FL7Z1J1Mw """
# Ansiballz packages module_utils into ansible.module_utils
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.juniper.device.plugins.module_utils import juniper_junos_common
from ansible.module_utils._text import to_bytes
from ansible_collections.juniper.device.plugins.module_utils import configuration as cfg
def get_facts_dict(junos_module):
"""Retreive PyEZ facts and convert to a standard dict w/o custom types.
Ansible >= 2.0 doesn't like custom objects in a modules return value.
Because PyEZ facts are a custom object rather than a true dict they must be
converted to a standard dict. Since facts are read-only, we must begin by
copying facts into a dict. Since PyEZ facts are "on-demand", the
junos_module.dev instance must be an open PyEZ Device instance ojbect
before this function is called.
Args:
junos_module: An instance of a JuniperJunosModule.
Returns:
A dict containing the device facts.
"""
if junos_module.conn_type == "local":
dev = junos_module.dev
# Retrieve all PyEZ-supported facts and copy to a standard dict.
facts = dict(dev.facts)
# Add two useful facts that are implement as PyEZ Device attributes.
facts['re_name'] = dev.re_name
facts['master_state'] = dev.master
else:
facts = junos_module.get_facts()
# Ansible doesn't allow keys starting with numbers.
# Replace the '2RE' key with the 'has_2RE' key.
if '2RE' in facts:
facts['has_2RE'] = facts['2RE']
del facts['2RE']
# The value of the 'version_info' key is a custom junos.version_info
# object. Convert this value to a dict.
if 'version_info' in facts and facts['version_info'] is not None:
facts['version_info'] = dict(facts['version_info'])
# The values of the ['junos_info'][re_name]['object'] keys are
# custom junos.version_info objects. Convert all of these to dicts.
if 'junos_info' in facts and facts['junos_info'] is not None:
for key in facts['junos_info']:
facts['junos_info'][key]['object'] = dict(
facts['junos_info'][key]['object'])
return facts
def save_facts(junos_module, facts):
"""If the savedir option was specified, save the facts into a JSON file.
If the savedir option was specified, save the facts into a JSON file named
savedir/hostname-facts.json. The filename begins with the value of the
hostname fact returned from the Junos device, which might be different than
the value of the host option passed to the module.
Args:
junos_module: An instance of a JuniperJunosModule.
facts: The facts dict returned by get_facts_dict().
Raises:
IOError: Calls junos_module.fail_json if unable to open the facts
file for writing.
"""
if junos_module.params.get('savedir') is not None:
save_dir = junos_module.params.get('savedir')
file_name = '%s-facts.json' % (facts['hostname'])
file_path = os.path.normpath(os.path.join(save_dir, file_name))
junos_module.logger.debug("Saving facts to: %s.", file_path)
try:
# TODO: Verify does this work with Python3
with open(file_path, 'w') as fact_file:
json.dump(facts, fact_file)
junos_module.logger.debug("Facts saved to: %s.", file_path)
except IOError:
junos_module.fail_json(msg="Unable to save facts. Failed to open "
"the %s file." % (file_path))
def save_inventory(junos_module, inventory):
"""If the savedir option was specified, save the XML inventory.
If the savedir option was specified, save the inventory XML output into
an XML file named savedir/hostname-inventory.xml. The filename begins with
the value of the hostname fact returned from the Junos device, which might
be different than the value of the host option passed to the module.
Args:
junos_module: An instance of a JuniperJunosModule.
inventory: The XML string of inventory to save.
Raises:
IOError: Calls junos_module.fail_json if unable to open the inventory
file for writing.
"""
if junos_module.conn_type == "local" :
dev = junos_module.dev
file_name = '%s-inventory.xml' % (dev.facts['hostname'])
else:
facts = junos_module._pyez_conn.get_facts()
file_name = '%s-inventory.xml' % (facts['hostname'])
if junos_module.params.get('savedir') is not None:
save_dir = junos_module.params.get('savedir')
file_path = os.path.normpath(os.path.join(save_dir, file_name))
junos_module.logger.debug("Saving inventory to: %s.", file_path)
try:
with open(file_path, 'wb') as fact_file:
fact_file.write(to_bytes(inventory, encoding='utf-8'))
junos_module.logger.debug("Inventory saved to: %s.", file_path)
except IOError:
junos_module.fail_json(msg="Unable to save inventory. Failed to "
"open the %s file." % (file_path))
def main():
config_format_choices = [None]
config_format_choices += juniper_junos_common.CONFIG_FORMAT_CHOICES
# Create the module instance.
junos_module = juniper_junos_common.JuniperJunosModule(
argument_spec=dict(
config_format=dict(choices=config_format_choices,
required=False,
default=None),
savedir=dict(type='path', required=False, default=None),
),
# Since this module doesn't change the device's configuration, there is
# no additional work required to support check mode. It's inherently
# supported.
supports_check_mode=True,
min_jxmlease_version=cfg.MIN_JXMLEASE_VERSION,
)
junos_module.logger.debug("Gathering facts.")
# Get the facts dictionary from the device.
facts = get_facts_dict(junos_module)
junos_module.logger.debug("Facts gathered.")
if junos_module.params.get('savedir') is not None:
# Save the facts.
save_facts(junos_module, facts)
# Get and save the inventory
try:
junos_module.logger.debug("Gathering inventory.")
if junos_module.conn_type == "local":
inventory = junos_module.dev.rpc.get_chassis_inventory()
else:
inventory = junos_module.get_chassis_inventory()
junos_module.logger.debug("Inventory gathered.")
save_inventory(junos_module,
junos_module.etree.tostring(inventory,
pretty_print=True))
except junos_module.pyez_exception.RpcError as ex:
junos_module.fail_json(msg='Unable to retrieve hardware '
'inventory: %s' % (str(ex)))
config_format = junos_module.params.get('config_format')
if config_format is not None:
(config, config_parsed) = junos_module.get_configuration(
format=config_format)
if config is not None:
facts.update({'config': config})
# Need to wait until the ordering issues are figured out before
# using config_parsed.
# if config_parsed is not None:
# facts.update({'config_parsed': config_parsed})
# Return response.
junos_module.exit_json(
changed=False,
failed=False,
ansible_facts={'junos': facts},
facts=facts)
if __name__ == '__main__':
main()
|
3ac9631564236cfdf474b77502b2ff8596d5ac53
|
f8215144c61ef88ed63ed536334a74abc53c5631
|
/keras_nlp/layers/preprocessing/multi_segment_packer.py
|
0f61d4751a4df311e04527e98b56bdc4e6f12358
|
[
"Apache-2.0"
] |
permissive
|
keras-team/keras-nlp
|
3906a35c64f543dc3713ed619eb5a790a6ff4a32
|
43cf146cb7670fc94f98ba88ed940f12d9848726
|
refs/heads/master
| 2023-08-16T05:12:06.003760
| 2023-08-15T22:51:58
| 2023-08-15T22:51:58
| 267,715,375
| 579
| 175
|
Apache-2.0
| 2023-09-14T19:33:47
| 2020-05-28T23:03:54
|
Python
|
UTF-8
|
Python
| false
| false
| 12,180
|
py
|
multi_segment_packer.py
|
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.layers.preprocessing.preprocessing_layer import (
PreprocessingLayer,
)
from keras_nlp.utils.tensor_utils import assert_tf_text_installed
from keras_nlp.utils.tensor_utils import convert_to_ragged_batch
try:
import tensorflow_text as tf_text
except ImportError:
tf_text = None
@keras_nlp_export("keras_nlp.layers.MultiSegmentPacker")
class MultiSegmentPacker(PreprocessingLayer):
"""Packs multiple sequences into a single fixed width model input.
This layer packs multiple input sequences into a single fixed width sequence
containing start and end delimeters, forming an dense input suitable for a
classification task for BERT and BERT-like models.
Takes as input a tuple of token segments. Each tuple element should contain
the tokens for a segment, passed as tensors, `tf.RaggedTensor`s, or lists.
For batched input, each element in the tuple of segments should be a list of
lists or a rank two tensor. For unbatched inputs, each element should be a
list or rank one tensor.
The layer will process inputs as follows:
- Truncate all input segments to fit within `sequence_length` according to
the `truncate` strategy.
- Concatenate all input segments, adding a single `start_value` at the
start of the entire sequence, and multiple `end_value`s at the end of
each segment.
- Pad the resulting sequence to `sequence_length` using `pad_tokens`.
- Calculate a separate tensor of "segment ids", with integer type and the
same shape as the packed token output, where each integer index of the
segment the token originated from. The segment id of the `start_value`
is always 0, and the segment id of each `end_value` is the segment that
precedes it.
Args:
sequence_length: int. The desired output length.
start_value: int/str/list/tuple. The id(s) or token(s) that are to be
placed at the start of each sequence (called "[CLS]" for BERT). The
dtype must match the dtype of the input tensors to the layer.
end_value: int/str/list/tuple. The id(s) or token(s) that are to be
placed at the end of the last input segment (called "[SEP]" for
BERT). The dtype must match the dtype of the input tensors to the
layer.
sep_value: int/str/list/tuple. The id(s) or token(s) that are to be
placed at the end of every segment, except the last segment (called
"[SEP]" for BERT). If `None`, `end_value` is used. The dtype must
match the dtype of the input tensors to the layer.
pad_value: int/str. The id or token that is to be placed into the unused
positions after the last segment in the sequence
(called "[PAD]" for BERT).
truncate: str. The algorithm to truncate a list of batched segments to
fit a per-example length limit. The value can be either
`"round_robin"` or `"waterfall"`:
- `"round_robin"`: Available space is assigned one token at a
time in a round-robin fashion to the inputs that still need
some, until the limit is reached.
- `"waterfall"`: The allocation of the budget is done using a
"waterfall" algorithm that allocates quota in a
left-to-right manner and fills up the buckets until we run
out of budget. It support arbitrary number of segments.
Returns:
A tuple with two elements. The first is the dense, packed token
sequence. The second is an integer tensor of the same shape, containing
the segment ids.
Examples:
*Pack a single input for classification.*
>>> seq1 = [1, 2, 3, 4]
>>> packer = keras_nlp.layers.MultiSegmentPacker(
... sequence_length=8, start_value=101, end_value=102
... )
>>> token_ids, segment_ids = packer((seq1,))
>>> np.array(token_ids)
array([101, 1, 2, 3, 4, 102, 0, 0], dtype=int32)
>>> np.array(segment_ids)
array([0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)
*Pack multiple inputs for classification.*
>>> seq1 = [1, 2, 3, 4]
>>> seq2 = [11, 12, 13, 14]
>>> packer = keras_nlp.layers.MultiSegmentPacker(
... sequence_length=8, start_value=101, end_value=102
... )
>>> token_ids, segment_ids = packer((seq1, seq2))
>>> np.array(token_ids)
array([101, 1, 2, 3, 102, 11, 12, 102], dtype=int32)
>>> np.array(segment_ids)
array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32)
*Pack multiple inputs for classification with different sep tokens.*
>>> seq1 = [1, 2, 3, 4]
>>> seq2 = [11, 12, 13, 14]
>>> packer = keras_nlp.layers.MultiSegmentPacker(
... sequence_length=8,
... start_value=101,
... end_value=102,
... sep_value=[102, 102],
... )
>>> token_ids, segment_ids = packer((seq1, seq2))
>>> np.array(token_ids)
array([101, 1, 2, 102, 102, 11, 12, 102], dtype=int32)
>>> np.array(segment_ids)
array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32)
Reference:
[Devlin et al., 2018](https://arxiv.org/abs/1810.04805).
"""
def __init__(
self,
sequence_length,
start_value,
end_value,
sep_value=None,
pad_value=None,
truncate="round_robin",
**kwargs,
):
assert_tf_text_installed(self.__class__.__name__)
super().__init__(**kwargs)
self.sequence_length = sequence_length
if truncate not in ("round_robin", "waterfall"):
raise ValueError(
"Only 'round_robin' and 'waterfall' algorithms are "
"supported. Received %s" % truncate
)
self.truncate = truncate
# Maintain private copies of start/end values for config purposes.
self._start_value = start_value
self._sep_value = sep_value
self._end_value = end_value
def check_special_value_type(value, value_name):
if isinstance(value, (int, str)):
return [value]
if value and not isinstance(value, (list, tuple)):
raise ValueError(
f"{value_name} should be of type int/str/list/tuple."
f"Received type: `{type(value)}`."
)
return value
start_value = check_special_value_type(start_value, "start_value")
if sep_value is None:
sep_value = end_value
sep_value = check_special_value_type(sep_value, "sep_value")
end_value = check_special_value_type(end_value, "end_value")
self.start_value = start_value
self.sep_value = sep_value
self.end_value = end_value
self.pad_value = pad_value
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"start_value": self._start_value,
"end_value": self._end_value,
"sep_value": self._sep_value,
"pad_value": self.pad_value,
"truncate": self.truncate,
}
)
return config
def _sanitize_inputs(self, inputs):
"""Force inputs to a list of rank 2 ragged tensors."""
# Sanitize inputs.
if not isinstance(inputs, (list, tuple)):
inputs = (inputs,)
if not inputs:
raise ValueError(
"At least one input is required for packing. "
f"Received: `inputs={inputs}`"
)
inputs, unbatched_list, _ = list(
zip(*(convert_to_ragged_batch(x) for x in inputs))
)
if len(set(unbatched_list)) != 1:
ranks = [1 if unbatched else 2 for unbatched in unbatched_list]
raise ValueError(
"All inputs for packing must have the same rank. "
f"Received: `inputs={inputs}` with ranks {ranks}"
)
return inputs, unbatched_list[0]
def _trim_inputs(self, inputs):
"""Trim inputs to desired length."""
num_segments = len(inputs)
num_special_tokens = (
len(self.start_value)
+ (num_segments - 1) * len(self.sep_value)
+ len(self.end_value)
)
if self.truncate == "round_robin":
return tf_text.RoundRobinTrimmer(
self.sequence_length - num_special_tokens
).trim(inputs)
elif self.truncate == "waterfall":
return tf_text.WaterfallTrimmer(
self.sequence_length - num_special_tokens
).trim(inputs)
else:
raise ValueError("Unsupported truncate: %s" % self.truncate)
def _combine_inputs(self, segments):
"""Combine inputs with start and end values added."""
dtype = segments[0].dtype
batch_size = segments[0].nrows()
start_value = tf.convert_to_tensor(self.start_value, dtype=dtype)
sep_value = tf.convert_to_tensor(self.sep_value, dtype=dtype)
end_value = tf.convert_to_tensor(self.end_value, dtype=dtype)
start_columns = tf.repeat(
start_value[tf.newaxis, :], repeats=batch_size, axis=0
)
sep_columns = tf.repeat(
sep_value[tf.newaxis, :], repeats=batch_size, axis=0
)
end_columns = tf.repeat(
end_value[tf.newaxis, :], repeats=batch_size, axis=0
)
ones_sep_columns = tf.ones_like(sep_columns, dtype="int32")
ones_end_columns = tf.ones_like(end_columns, dtype="int32")
segments_to_combine = [start_columns]
segment_ids_to_combine = [
tf.ones_like(start_columns, dtype="int32") * 0
]
for i, seg in enumerate(segments):
# Combine all segments.
segments_to_combine.append(seg)
# Combine segment ids.
segment_ids_to_combine.append(tf.ones_like(seg, dtype="int32") * i)
# Account for the sep/end tokens here.
if i == len(segments) - 1:
segments_to_combine.append(end_columns)
segment_ids_to_combine.append(ones_end_columns * i)
else:
segments_to_combine.append(sep_columns)
segment_ids_to_combine.append(ones_sep_columns * i)
token_ids = tf.concat(segments_to_combine, 1)
segment_ids = tf.concat(segment_ids_to_combine, 1)
return token_ids, segment_ids
def call(self, inputs):
inputs, unbatched = self._sanitize_inputs(inputs)
segments = self._trim_inputs(inputs)
token_ids, segment_ids = self._combine_inputs(segments)
# Pad to dense tensor output.
shape = tf.cast([-1, self.sequence_length], "int64")
token_ids = token_ids.to_tensor(
shape=shape, default_value=self.pad_value
)
segment_ids = segment_ids.to_tensor(shape=shape)
# Remove the batch dim if added.
if unbatched:
token_ids = tf.squeeze(token_ids, 0)
segment_ids = tf.squeeze(segment_ids, 0)
return (token_ids, segment_ids)
def compute_output_shape(self, inputs_shape):
if isinstance(inputs_shape[0], tuple):
inputs_shape = inputs_shape[0]
inputs_shape = list(inputs_shape)
inputs_shape[-1] = self.sequence_length
return tuple(inputs_shape)
|
d5592bc92dcbc5ec71a361c3e589c9cae74f0cf6
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/sms/notify.py
|
21d3ab2beb520ae1dac2876dfb56358d1e5d84c0
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,562
|
py
|
notify.py
|
"""Support for SMS notification services."""
from __future__ import annotations
import logging
import gammu
from homeassistant.components.notify import ATTR_DATA, BaseNotificationService
from homeassistant.const import CONF_TARGET
from homeassistant.core import HomeAssistant
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import CONF_UNICODE, DOMAIN, GATEWAY, SMS_GATEWAY
_LOGGER = logging.getLogger(__name__)
async def async_get_service(
hass: HomeAssistant,
config: ConfigType,
discovery_info: DiscoveryInfoType | None = None,
) -> SMSNotificationService | None:
"""Get the SMS notification service."""
if discovery_info is None:
return None
return SMSNotificationService(hass)
class SMSNotificationService(BaseNotificationService):
"""Implement the notification service for SMS."""
def __init__(self, hass):
"""Initialize the service."""
self.hass = hass
async def async_send_message(self, message="", **kwargs):
"""Send SMS message."""
if SMS_GATEWAY not in self.hass.data[DOMAIN]:
_LOGGER.error("SMS gateway not found, cannot send message")
return
gateway = self.hass.data[DOMAIN][SMS_GATEWAY][GATEWAY]
targets = kwargs.get(CONF_TARGET)
if targets is None:
_LOGGER.error("No target number specified, cannot send message")
return
extended_data = kwargs.get(ATTR_DATA)
_LOGGER.debug("Extended data:%s", extended_data)
if extended_data is None:
is_unicode = True
else:
is_unicode = extended_data.get(CONF_UNICODE, True)
smsinfo = {
"Class": -1,
"Unicode": is_unicode,
"Entries": [{"ID": "ConcatenatedTextLong", "Buffer": message}],
}
try:
# Encode messages
encoded = gammu.EncodeSMS(smsinfo)
except gammu.GSMError as exc:
_LOGGER.error("Encoding message %s failed: %s", message, exc)
return
# Send messages
for encoded_message in encoded:
# Fill in numbers
encoded_message["SMSC"] = {"Location": 1}
for target in targets:
encoded_message["Number"] = target
try:
# Actually send the message
await gateway.send_sms_async(encoded_message)
except gammu.GSMError as exc:
_LOGGER.error("Sending to %s failed: %s", target, exc)
|
3efcc2beaa1a45bfef8b6bb076445fd57a58f120
|
302c3a7aa0691291a4311e4cc6620c60dc69ef04
|
/word_sprite/Game_View.py
|
0a8c9676d25ec148ed6d12314384d1e5a88f15ca
|
[] |
no_license
|
HuiDBK/WordSprite
|
6c7e105f451ef4dade185679a002b32720e7d60c
|
61fe12fd452c4e8eeef826285a0b713b7e461708
|
refs/heads/master
| 2023-08-24T19:32:50.839642
| 2023-08-05T05:13:45
| 2023-08-05T05:13:45
| 302,100,080
| 185
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,856
|
py
|
Game_View.py
|
#!/usr/zip/env python
# -*- coding:utf-8 -*-
"""
Author: Hui
Description: { 游戏视图模块 }
"""
import threading
import PySimpleGUI as sg
from tkinter import colorchooser
from word_sprite import Game_Info
from word_sprite.Game_Info import GameConfig
class BaseWin(object):
"""窗口父类"""
WIN_THEME = sg.theme('DarkBlue1')
text_color = 'white'
game_conf = GameConfig() # 游戏信息配置类
def __init__(self, title):
self.title = title
self.window = None
self.layout = list()
def close_win(self):
"""关闭窗口"""
if self.window is not None:
self.window.close()
class GameStartWin(BaseWin):
"""游戏开始窗口"""
_voice_flag = True
def __init__(self, title):
from word_sprite.Game_Main import TypingGame # handle circular import
self.TypingGame = TypingGame
super().__init__(title)
self.__init_layout()
def __init_layout(self):
"""初始化窗口布局"""
if self._voice_flag:
voice_img = Game_Info.VOICE_ICO
else:
voice_img = Game_Info.MUTE_ICO
self.layout = [
[sg.Text(size=(70, 0)), sg.Image(filename=voice_img, key='voice_control', enable_events=True)],
[sg.Text(size=(10, 10)), sg.Text('Word Sprite', font=(u'宋体', 50)), sg.Text(size=(10, 10))],
[sg.Text(size=(23, 10)), sg.Button(u'开始游戏', font=(u'宋体', 30), key='start_game'),
sg.Text(size=(23, 10))],
[sg.Text(size=(23, 5)), sg.Button(u'游戏设置', font=(u'宋体', 30), key='game_set'), sg.Text(size=(23, 5))],
[sg.Text(size=(23, 10)), sg.Button(u'历史最高', font=(u'宋体', 30), key='show_score'),
sg.Text(size=(23, 10))],
[
sg.Text(size=(70, 0)),
sg.Image(
filename=Game_Info.GAME_ICON_48,
key='game_ico',
enable_events=True
)
]
]
def run(self):
"""启动游戏开始窗口"""
self.window = sg.Window(
title=self.title,
icon=Game_Info.GAME_ICON,
layout=self.layout
)
self.__event_handler()
def __event_handler(self):
"""窗口事件监听"""
while True:
event, value_dict = self.window.read(timeout=20)
print(event, value_dict)
# 静音控制
if self._voice_flag:
self.window.find_element('voice_control').update(filename=Game_Info.VOICE_ICO)
else:
self.window.find_element('voice_control').update(filename=Game_Info.MUTE_ICO)
if event in (sg.WIN_CLOSED, 'Quit'):
break
elif event in 'voice_control':
self.voice_control()
elif event in 'game_ico':
self.author_win()
elif event in 'start_game':
print('开始游戏')
self.window.Hide()
self.TypingGame.game_over_flag = False
self.TypingGame.game_quit_flag = False
threading.Thread(target=self.start_game).start() # 利用线程开启游戏防止窗口卡死
elif event in 'game_set' or self.TypingGame.game_pause_flag:
print('游戏设置')
self.window.Disable()
self.game_set()
elif event in 'show_score':
print('历史最高')
self.window.Disable()
self.show_score()
elif self.TypingGame.game_quit_flag:
self.window.UnHide()
self.window.close()
def voice_control(self):
"""游戏静音状态控制"""
if self._voice_flag:
GameStartWin._voice_flag = False
self.window.find_element('voice_control').update(filename=Game_Info.MUTE_ICO)
else:
GameStartWin._voice_flag = True
self.window.find_element('voice_control').update(filename=Game_Info.VOICE_ICO)
def author_win(self):
"""游戏开发信息窗口"""
self.window.Disable()
game_conf = Game_Info.GameConfig()
show_text = '游戏作者: \t' + game_conf.author + '\n\n' \
'游戏名称: \t' + game_conf.game_name + '\n\n' \
'游戏版本: \t' + game_conf.version + '\n\n' \
'作者邮箱: \t' + game_conf.e_mail + '\n'
sg.Popup(
show_text,
title=u'关于作者',
icon=Game_Info.GAME_ICON,
font=(u'宋体', 18),
custom_text=(u' ★ ', u' ❤ '),
button_color=('red', '#063288'),
line_width=50,
)
self.window.Enable()
def game_set(self):
"""游戏设置"""
GameSetWin(u"游戏配置", self).run()
def show_score(self):
"""查看历史最高分"""
GameScoreWin(u'历史最高', self).run()
def start_game(self):
"""开始游戏"""
self.TypingGame().start_game()
@classmethod
def voice_flag(cls):
return cls._voice_flag
class GameExecuteWin(object):
"""游戏运行窗口"""
pass
class GameEndWin(object):
"""游戏结束窗口"""
def __init__(self):
pass
class GameSetWin(BaseWin):
"""游戏配置信息窗口"""
# 游戏等级对照字典
game_level_dict = {
1: {"game_level_num": 5, "game_level_text": u"简单", "game_level_color": "green"},
2: {"game_level_num": 15, "game_level_text": u"上手", "game_level_color": "blue"},
3: {"game_level_num": 25, "game_level_text": u"中等", "game_level_color": "orange"},
4: {"game_level_num": 35, "game_level_text": u"困难", "game_level_color": "red"},
5: {"game_level_num": 50, "game_level_text": u"魔鬼", "game_level_color": "purple"}
}
def __init__(self, title, parent_win=None):
"""初始化游戏配置界面"""
super().__init__(title)
self.parent_win = parent_win
self.word_normal_color = self.game_conf.word_normal_color
self.spell_ok_color = self.game_conf.spell_ok_color
self.__init_layout()
def __init_layout(self):
game_level_num = self.game_level_dict[int(self.game_conf.game_level)]["game_level_num"]
game_level_text = self.game_level_dict[int(self.game_conf.game_level)]["game_level_text"]
game_level_color = self.game_level_dict[int(self.game_conf.game_level)]["game_level_color"]
self.layout = [
[
sg.Text(u'游戏难度等级:', text_color=self.text_color),
sg.Slider(
range=(1, 50), default_value=game_level_num,
size=(26, 18), orientation='h', key="game_level",
enable_events=True, disable_number_display=True,
),
sg.Button(
game_level_text, key='game_level_btn',
button_color=(self.text_color, game_level_color),
),
],
[
sg.Text(u'游戏字体大小:', text_color=self.text_color),
sg.Slider(
range=(15, 35), default_value=int(self.game_conf.word_size),
size=(26, 18), enable_events=True,
orientation='h', disable_number_display=True, key="word_size"
),
sg.Text(
str(self.game_conf.word_size), text_color=self.text_color, size=(3, 1),
font=("宋体", int(self.game_conf.word_size)),
key='word_size_num'
),
],
[
sg.Text(u'游戏初始血条:', text_color=self.text_color),
sg.Slider(
range=(5, 30), default_value=int(self.game_conf.game_init_blood),
size=(26, 18), orientation='h',
enable_events=True, disable_number_display=True, key='init_blood'
),
sg.Text(
str(self.game_conf.game_init_blood), size=(3, 1),
text_color=self.text_color, key='blood_num'
)
],
[
sg.Text(u'游戏静音状态:', text_color=self.text_color),
sg.Radio(
' ', default=GameStartWin.voice_flag(), key='voice_open',
group_id=1, text_color=self.text_color, enable_events=True
),
sg.Image(filename=Game_Info.VOICE_ICO),
sg.Text(' ' * 5),
sg.Radio(
' ', default=not GameStartWin.voice_flag(), key='mute',
group_id=1, text_color=self.text_color, enable_events=True
),
sg.Image(filename=Game_Info.MUTE_ICO)
],
[
sg.Text(u'游戏字体颜色:', text_color=self.text_color),
sg.Text(
'', size=(17, 1),
background_color=self.game_conf.word_normal_color,
enable_events=True, key='word_normal_color'
),
sg.Text(
'HUI', key='word_color_test',
text_color=self.game_conf.word_normal_color,
),
sg.Button(u'颜色选择', key='normal_ccb')
],
[
sg.Text(u'单词拼写颜色:', text_color=self.text_color),
sg.Text(
'', size=(17, 1),
background_color=self.game_conf.spell_ok_color,
enable_events=True, key='spell_ok_color'
),
sg.Text(
'HUI', key='spell_color_test',
text_color=self.game_conf.spell_ok_color,
),
sg.Button(u'颜色选择', key='spell_ccb')
],
[
sg.Submit(u'暂时保存', key='temp_save', pad=((10, 350), (0, 0))),
sg.Button(u'永久保存', key='permanent')
]
]
def run(self):
"""开启游戏设置界面"""
self.window = sg.Window(
title=self.title,
icon=Game_Info.GAME_ICON,
layout=self.layout,
font=("宋体", 18),
element_padding=(10, 30),
)
# 开启事件监听
self.__event_handler()
@staticmethod
def color_callback(color=None):
"""颜色按钮回调方法"""
return colorchooser.askcolor(color)
def __event_handler(self):
while True:
event, value_dict = self.window.read()
# print(event, value_dict)
if event in (None, 'Quit'):
break
elif event in ('voice_open', 'mute'):
if value_dict['voice_open']:
GameStartWin._voice_flag = True
else:
GameStartWin._voice_flag = False
elif event in 'game_level':
game_level = self.get_game_level(int(value_dict[event]))
game_level_text = self.game_level_dict[game_level]['game_level_text']
game_level_color = self.game_level_dict[game_level]['game_level_color']
self.window.find_element('game_level_btn').update(
game_level_text,
button_color=(self.text_color, game_level_color)
)
elif event in 'game_level_btn':
# 点击按钮切换游戏等级
game_level = self.get_game_level(int(value_dict['game_level']))
if game_level == 5:
game_level = 0
game_level_num = self.game_level_dict[game_level + 1]['game_level_num']
game_level_text = self.game_level_dict[game_level + 1]['game_level_text']
game_level_color = self.game_level_dict[game_level + 1]['game_level_color']
self.window.find_element('game_level').update(game_level_num)
self.window.find_element('game_level_btn').update(
game_level_text,
button_color=(self.text_color, game_level_color)
)
elif event in 'word_size':
word_size_num = value_dict[event]
self.window.find_element('word_size_num').update(int(word_size_num), font=(u'宋体', int(word_size_num)))
elif event in 'init_blood':
blood_num = int(value_dict[event])
self.window.find_element('blood_num').update(str(blood_num))
elif event in 'normal_ccb':
# 游戏单词颜色选择
self.window.Disable() # 让游戏配置窗口不可用,不让用户乱点击,防止多开
choose_colors = self.color_callback(self.game_conf.word_normal_color)
self.window.Enable() # 恢复游戏配置窗口
if None not in choose_colors:
self.window.find_element('word_normal_color').update(background_color=choose_colors[1])
self.window.find_element('word_color_test').update(text_color=choose_colors[1])
self.word_normal_color = choose_colors[1]
elif event in 'spell_ccb':
# 单词拼写颜色选择
self.window.Disable() # 让游戏配置窗口不可用,不让用户乱点击,防止多开
choose_colors = self.color_callback(self.game_conf.spell_ok_color)
self.window.Enable() # 恢复游戏配置窗口
if None not in choose_colors:
self.window.find_element('spell_ok_color').update(background_color=choose_colors[1])
self.window.find_element('spell_color_test').update(text_color=choose_colors[1])
self.spell_ok_color = choose_colors[1]
elif event in ('temp_save', 'permanent'):
GameSetWin.SAVE_STATUS = True
game_level = self.get_game_level(int(value_dict['game_level']))
value_dict['game_level'] = game_level
value_dict['normal_ccb'] = self.word_normal_color
value_dict['spell_ccb'] = self.spell_ok_color
if event in 'temp_save':
self.temp_save(value_dict)
elif event in 'permanent':
self.permanent(value_dict)
break
self.window.close()
self.TypingGame.game_pause_flag = False
# 恢复父窗口可用
if self.parent_win is not None:
self.parent_win.window.Enable()
def temp_save(self, game_dict):
"""临时保存游戏配置信息(临时有效,重开还原)"""
"""
{
'game_level': 2,
'word_size': 26.0,
'init_blood': 20.0,
'voice_open': True,
'mute': False,
'normal_ccb': '#00ffff',
'spell_ccb': '#ff0000'
}
"""
self.game_conf.game_level = game_dict['game_level']
self.game_conf.word_size = game_dict['word_size']
self.game_conf.game_init_blood = game_dict['init_blood']
self.game_conf.word_normal_color = game_dict['normal_ccb']
self.game_conf.spell_ok_color = game_dict['spell_ccb']
def permanent(self, game_dict):
"""永久保存游戏配置信息(写入配置文件)"""
# 修改配置文件
self.game_conf.set_game_level(game_dict['game_level'])
self.game_conf.set_word_size(int(game_dict['word_size']))
self.game_conf.set_game_init_blood(int(game_dict['init_blood']))
self.game_conf.set_word_normal_color(game_dict['normal_ccb'])
self.game_conf.set_spell_ok_color(game_dict['spell_ccb'])
@staticmethod
def get_game_level(data):
game_level = 1
if data <= 10:
game_level = 1
elif data <= 20:
game_level = 2
elif data <= 30:
game_level = 3
elif data <= 40:
game_level = 4
elif data <= 50:
game_level = 5
return game_level #
class GameScoreWin(BaseWin):
"""游戏历史分数窗口"""
heads = [
'{:4}'.format(u'游戏等级'),
'{:4}'.format(u'最高分'),
'{:6}'.format(u'耗 时'),
'{:4}'.format(u'创建时间'),
]
levels = ['level_1', 'level_2', 'level_3', 'level_4', 'level_5']
def __init__(self, title, parent_win=None):
super().__init__(title)
self.parent_win = parent_win
self.__init_layout()
def __init_layout(self):
"""初始化窗口布局"""
score_dict = Game_Info.game_conf.history_score_dict # 游戏历史记录
level_0, level_1 = eval(score_dict[self.levels[0]]), eval(score_dict[self.levels[1]])
level_2, level_3 = eval(score_dict[self.levels[2]]), eval(score_dict[self.levels[3]])
level_4 = eval(score_dict[self.levels[4]])
header = [[sg.Text(h, pad=(31, 30)) for h in self.heads]]
body = [
[
sg.Button(u'简单', button_color=('white', 'green')),
sg.Text('{:4}'.format(str(level_0['score']))),
sg.Text('{:6}'.format(str(level_0['use_time']))),
sg.Text('{:4}'.format(str(level_0['create_time'])))
],
[
sg.Button(u'上手', button_color=('white', 'blue')),
sg.Text('{:4}'.format(str(level_1['score']))),
sg.Text('{:6}'.format(str(level_1['use_time']))),
sg.Text('{:4}'.format(str(level_1['create_time'])))
],
[
sg.Button(u'中等', button_color=('white', 'orange')),
sg.Text('{:4}'.format(str(level_2['score']))),
sg.Text('{:6}'.format(str(level_2['use_time']))),
sg.Text('{:4}'.format(str(level_2['create_time'])))
],
[
sg.Button(u'困难', button_color=('white', 'red')),
sg.Text('{:4}'.format(str(level_3['score']))),
sg.Text('{:6}'.format(str(level_3['use_time']))),
sg.Text('{:4}'.format(str(level_3['create_time'])))
],
[
sg.Button(u'魔鬼', button_color=('white', 'purple')),
sg.Text('{:4}'.format(str(level_4['score']))),
sg.Text('{:6}'.format(str(level_4['use_time']))),
sg.Text('{:4}'.format(str(level_4['create_time'])))
]
]
self.layout = header + body
def run(self):
"""启动游戏历史分数窗口"""
self.window = sg.Window(
title=self.title,
icon=Game_Info.GAME_ICON,
layout=self.layout,
font=('宋体', 20),
element_padding=(46, 30)
)
self.__event_handler()
def __event_handler(self):
"""窗口事件监听"""
while True:
event, value_dict = self.window.read()
print(event, value_dict)
if event in (sg.WIN_CLOSED, 'Quit'):
self.parent_win.window.Enable()
break
self.window.close()
def main():
GameStartWin(title="Word Sprite").run()
if __name__ == '__main__':
main()
|
757003e1b67c39198a71426f2220dab60ae0f244
|
e90bf4b372da78ceec15282d060b48d18ba8d4e9
|
/supervisor/backups/__init__.py
|
37bbde0ccd0f1b8efe28db5f18560b118178b0a9
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/supervisor
|
67f2e1755ff5fbf7cf2084351e1c32c6995274e0
|
4838b280adafed0997f32e021274b531178386cd
|
refs/heads/main
| 2023-08-31T22:51:25.949277
| 2023-08-31T08:01:42
| 2023-08-31T08:01:42
| 84,926,758
| 928
| 477
|
Apache-2.0
| 2023-09-14T17:11:27
| 2017-03-14T08:54:15
|
Python
|
UTF-8
|
Python
| false
| false
| 29
|
py
|
__init__.py
|
"""Backup system control."""
|
b286aa2acffc41444819c7e5877f09827fddf14a
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/check-if-an-original-string-exists-given-two-encoded-strings.py
|
5633324426366ddf1d1f172af8c5ddb828b0e6ff
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 6,962
|
py
|
check-if-an-original-string-exists-given-two-encoded-strings.py
|
# Time: O(m * n * k), k is the max number of consecutive digits in s1 and s2
# Space: O(m * n * k)
# top-down dp (faster since accessing less states)
class Solution(object):
def possiblyEquals(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
def general_possible_numbers(s): # Time: O(2^l), Space: O(2^l), l is the length of consecutive digits, and l is at most 3
dp = [set() for _ in xrange(len(s))]
for i in xrange(len(s)):
curr, basis = 0, 1
for j in reversed(xrange(i+1)):
curr += int(s[j])*basis
basis *= 10
if s[j] == '0':
continue
if j == 0:
dp[i].add(curr)
else:
dp[i].update(x+curr for x in dp[j-1])
return dp[-1]
def optimized_possible_numbers(s):
assert(len(s) <= 3)
result = {int(s)}
if len(s) >= 2:
if s[1] != '0':
result.add(int(s[:1])+int(s[1:]))
if len(s) >= 3:
if s[2] != '0':
result.add(int(s[:2])+int(s[2:]))
if s[1] != '0':
result.add(int(s[0:1])+int(s[1:2])+int(s[2:]))
return result
def memoization(s1, s2, i, j, k, lookup):
if (i, j, k) not in lookup:
if i == len(s1) and j == len(s2):
lookup[(i, j, k)] = (k == 0)
elif i != len(s1) and s1[i].isdigit():
lookup[(i, j, k)] = False
for ni in xrange(i+1, len(s1)+1):
if ni == len(s1) or not s1[ni].isdigit():
break
for x in optimized_possible_numbers(s1[i:ni]):
if memoization(s1, s2, ni, j, k+x, lookup):
lookup[(i, j, k)] = True
break
elif j != len(s2) and s2[j].isdigit():
lookup[(i, j, k)] = False
for nj in xrange(j+1, len(s2)+1):
if nj == len(s2) or not s2[nj].isdigit():
break
for x in optimized_possible_numbers(s2[j:nj]):
if memoization(s1, s2, i, nj, k-x, lookup):
lookup[(i, j, k)] = True
break
elif k < 0:
lookup[(i, j, k)] = memoization(s1, s2, i+1, j, k+1, lookup) if i != len(s1) else False
elif k > 0:
lookup[(i, j, k)] = memoization(s1, s2, i, j+1, k-1, lookup) if j != len(s2) else False
else:
lookup[(i, j, k)] = memoization(s1, s2, i+1, j+1, k, lookup) if i != len(s1) and j != len(s2) and s1[i] == s2[j] else False
return lookup[(i, j, k)]
return memoization(s1, s2, 0, 0, 0, {})
# Time: O(m * n * k), k is the max number of consecutive digits in s1 and s2
# Space: O(m * n * k)
# top-down dp (faster since accessing less states)
class Solution2(object):
def possiblyEquals(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
def memoization(s1, s2, i, j, k, lookup):
if (i, j, k) not in lookup:
if i == len(s1) and j == len(s2):
lookup[(i, j, k)] = (k == 0)
elif i != len(s1) and s1[i].isdigit():
lookup[(i, j, k)] = False
for ni in xrange(i+1, len(s1)+1):
if (ni == len(s1) or s1[ni] != '0') and memoization(s1, s2, ni, j, k+int(s1[i:ni]), lookup):
lookup[(i, j, k)] = True
break
if ni == len(s1) or not s1[ni].isdigit():
break
elif j != len(s2) and s2[j].isdigit():
lookup[(i, j, k)] = False
for nj in xrange(j+1, len(s2)+1):
if (nj == len(s2) or s2[nj] != '0') and memoization(s1, s2, i, nj, k-int(s2[j:nj]), lookup):
lookup[(i, j, k)] = True
break
if nj == len(s2) or not s2[nj].isdigit():
break
elif k < 0:
lookup[(i, j, k)] = memoization(s1, s2, i+1, j, k+1, lookup) if i != len(s1) else False
elif k > 0:
lookup[(i, j, k)] = memoization(s1, s2, i, j+1, k-1, lookup) if j != len(s2) else False
else:
lookup[(i, j, k)] = memoization(s1, s2, i+1, j+1, k, lookup) if i != len(s1) and j != len(s2) and s1[i] == s2[j] else False
return lookup[(i, j, k)]
return memoization(s1, s2, 0, 0, 0, {})
# Time: O(m * n * k), k is the max number of consecutive digits in s1 and s2
# Space: O(min(m, n) * k)
# bottom-up dp
class Solution3(object):
def possiblyEquals(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
MAX_DIGIT_LEN = 3
w = 1+MAX_DIGIT_LEN
dp = [[set() for _ in xrange(len(s2)+1)] for _ in xrange(w)]
dp[0][0].add(0)
for i in xrange(len(s1)+1):
if i:
dp[(i-1)%w] = [set() for _ in xrange(len(s2)+1)]
if i != len(s1) and s1[i] == '0':
continue
for j in xrange(len(s2)+1):
for k in dp[i%w][j]:
if i != len(s1) and j != len(s2) and s1[i] == s2[j] and k == 0:
dp[(i+1)%w][j+1].add(k)
if k <= 0 and i != len(s1):
if not s1[i].isdigit():
if k:
dp[(i+1)%w][j].add(k+1)
elif s1[i] != '0':
curr = 0
for ni in xrange(i, len(s1)):
if not s1[ni].isdigit():
break
curr = curr*10 + int(s1[ni])
dp[(ni+1)%w][j].add(k+curr)
if k >= 0 and j != len(s2):
if not s2[j].isdigit():
if k:
dp[i%w][j+1].add(k-1)
elif s2[j] != '0':
curr = 0
for nj in xrange(j, len(s2)):
if not s2[nj].isdigit():
break
curr = curr*10 + int(s2[nj])
dp[i%w][nj+1].add(k-curr)
return 0 in dp[len(s1)%w][len(s2)]
|
7249a737130fda8e9c507e46dfdfa93e7f9504cd
|
f8dee139258b7d971bd1cfa16bd16e356537bbac
|
/Contents/Libraries/Shared/json_tricks/np.py
|
676041f9f1f6736e05316cca3227a9cf7467e011
|
[
"MIT"
] |
permissive
|
pannal/Sub-Zero.bundle
|
79673016ae68d1f2e9886fd30b8763b73a8f6cf8
|
4ced7d8c8f9f5fb47d12410f87fa33d782e9f0f4
|
refs/heads/master
| 2023-07-27T23:04:32.925845
| 2023-07-09T13:07:38
| 2023-07-09T13:08:04
| 21,959,699
| 1,820
| 178
|
NOASSERTION
| 2022-11-28T03:23:13
| 2014-07-17T22:19:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
np.py
|
"""
This file exists for backward compatibility reasons.
"""
from logging import warning
from .nonp import NoNumpyException, DEFAULT_ENCODERS, DEFAULT_HOOKS, dumps, dump, loads, load # keep 'unused' imports
from .utils import hashodict, NoPandasException
from .comment import strip_comment_line_with_symbol, strip_comments # keep 'unused' imports
from .encoders import TricksEncoder, json_date_time_encode, class_instance_encode, ClassInstanceEncoder, \
numpy_encode, NumpyEncoder # keep 'unused' imports
from .decoders import DuplicateJsonKeyException, TricksPairHook, json_date_time_hook, ClassInstanceHook, \
json_complex_hook, json_set_hook, json_numpy_obj_hook # keep 'unused' imports
try:
import numpy
except ImportError:
raise NoNumpyException('Could not load numpy, maybe it is not installed? If you do not want to use numpy encoding '
'or decoding, you can import the functions from json_tricks.nonp instead, which do not need numpy.')
# todo: warning('`json_tricks.np` is deprecated, you can import directly from `json_tricks`')
DEFAULT_NP_ENCODERS = [numpy_encode,] + DEFAULT_ENCODERS # DEPRECATED
DEFAULT_NP_HOOKS = [json_numpy_obj_hook,] + DEFAULT_HOOKS # DEPRECATED
|
c400b52afe31e0a30df07671144b778e82218d31
|
95d20c83d8aff34e314c56a3ecb2b87c9fa9fc86
|
/Ghidra/Features/Python/ghidra_scripts/python_basics.py
|
dff8d2f2b50bb4e2b64cb39d3d6486f0ae5d0458
|
[
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LGPL-2.1-only",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NationalSecurityAgency/ghidra
|
969fe0d2ca25cb8ac72f66f0f90fc7fb2dbfa68d
|
7cc135eb6bfabd166cbc23f7951dae09a7e03c39
|
refs/heads/master
| 2023-08-31T21:20:23.376055
| 2023-08-29T23:08:54
| 2023-08-29T23:08:54
| 173,228,436
| 45,212
| 6,204
|
Apache-2.0
| 2023-09-14T18:00:39
| 2019-03-01T03:27:48
|
Java
|
UTF-8
|
Python
| false
| false
| 2,385
|
py
|
python_basics.py
|
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Examples of basic Python
# @category: Examples.Python
# Python data types
my_int = 32
print my_int
print hex(my_int)
my_bool = True
print my_bool
print not my_bool
my_string = 'this is a string'
print my_string
print my_string[:4]
print my_string[-5:]
print type(my_string)
my_list = ["a", 2, 5.3, my_string]
print my_list
print my_list[1]
print my_list[1:2]
print my_list + [1, 2, 3]
print type(my_list)
my_tuple = (1, 2, 3)
print my_tuple
print my_tuple + (4,)
print type(my_tuple)
my_dictionary = {"key1": "1", "key2": 2, "key3": my_list}
print my_dictionary["key3"]
print type(my_dictionary)
my_null = None
print my_null
print type(my_null)
# Python conditionals
if len(my_string) == 16:
print "length of my_string is 16!"
if 4 not in my_list:
print "4 is not in my_list!"
if type(my_dictionary) == type(dict):
print "my_dictionary is a dictionary!"
if my_null is None and 2 + 2 == 4:
print "my_null is None and 2 + 2 == 4!"
# Python loops
for i in range(1, 10):
print i
for letter in "word":
print letter
for element in [100, 200, 300]:
print element
for key in my_dictionary:
print "%s:%s" % (key, my_dictionary[key])
i = 5
while i < 8:
print i
i += 1
# Python functions
def factorial(n):
if n == 0:
return 1
return n * factorial(n-1)
i = 4
print str(i) + "! = " + str(factorial(4))
# Python exceptions
def error_function():
raise IOError("An IO error occurred!")
try:
error_function()
print "I won't print"
except IOError as e:
print e.message
# Python class
class Employee:
def __init__(self, id, name):
self.id = id
self.name = name
def getId(self):
return self.id
def getName(self):
return self.name
e = Employee(5555, "Snoopy")
print e.getName()
|
54c30e89a8e9dd463685756f170d969047c1ce6b
|
8d93a21c76a655bcb9b36cac8dd13f1e0594f9cb
|
/lib/server/AsyncCommand.py
|
1acb63804c789b338a592c885a7f0619199e59a2
|
[
"MIT"
] |
permissive
|
Phaiax/ArcticTypescript
|
8cfbe698381fa58dc90984f5e5d338b13350fe47
|
061f197e3904d129325125c004bf561651d93c39
|
refs/heads/master
| 2021-01-17T11:48:37.981158
| 2015-05-21T14:28:50
| 2015-05-21T14:28:50
| 26,186,004
| 121
| 16
| null | 2016-06-11T19:41:48
| 2014-11-04T19:54:28
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 7,317
|
py
|
AsyncCommand.py
|
# coding=utf8
import uuid
import time
import sublime
import json
from ..utils import Debug
from ..utils.debounce import DEFAULT_DEBOUNCE_DELAY
# ----------------------------------------- ASYNC COMMAND ---------------------------------- #
class AsyncCommand(object):
"""
Represents a command wich can be executed by typescript services tss.js (clausreinke)
or tsserver.js (Microsoft/Typescript)
This class provices a chainable interface for config and can add itself to the
async execution queue via the append_to_***_queue*() commands.
Example for use and execution:
AsyncCommand('errors', project) \
.activate_debounce() \
.set_callback_kwargs(bar="foo", abc="def") \
.set_result_callback(rc) \
.procrastinate() \
.set_id('errors') \
.do_json_decode_tss_answer() \
.append_to_slow_queue()
The id is used to identify brother commands which do the same thing (maybe on the same file).
If an id is given, all pending commands with the same id will
be merged into one command. By default, the command will be executed, when
it's first brother is on the turn. Use procastinate() to reverse this behaviour:
Then it will be deffered until the last aka newest brother is on turn.
Id will also be used for debouncing.
"""
MERGE_PROCRASTINATE = 1
MERGE_IMMEDIATE = 2
def __init__(self, command, project):
self.command = command
self.project = project
self.id = "%s-rnd%s" % (command[:6][:-1], uuid.uuid4().hex[0:5])
self.result_callback = None
self.replaced_callback = None
self.executing_callback = None
self.callback_kwargs = {}
self.merge_behaviour = self.MERGE_IMMEDIATE
self.debounce_time = 0
self.json_decode_tss_answer = False
self.time_queue = 0
self.time_last_bounce = 0
self.time_execute = 0
self.time_finish = 0
self.is_executed = False
# ------------------------- chainable config ---------------------------------- #
def set_id(self, _id):
""" set id for merging. See AsyncCommand.__doc__ for more information about merging """
self.id = _id
return self
def procrastinate(self):
""" Set merging strategy to procastinate. See AsyncCommand.__doc__ for more information about merging """
self.merge_behaviour = self.MERGE_PROCRASTINATE
return self
def activate_debounce(self, delay=DEFAULT_DEBOUNCE_DELAY):
"""
Activates debouncing. Command will only be executed when there are no new same-id commands for <delay> seconds.
Attention: This is only tested with MERGE_PROCRASTINATE activated.
"""
self.debounce_time = delay
return self
def do_json_decode_tss_answer(self):
self.json_decode_tss_answer = True
return self
def set_callback_kwargs(self, **callback_kwargs):
""" Set additional arguments the callbacks will be called with. """
self.callback_kwargs = callback_kwargs
return self
def set_result_callback(self, result_callback=None):
""" Will be called as result_callback(tss_answer, **callback_kwargs). """
self.result_callback = result_callback
return self
def set_replaced_callback(self, replaced_callback=None):
"""
Will be called as replaced_callback(now_used_command, **callback_kwargs)
when this command has been deleted from queue without execution.
"""
self.replaced_callback = replaced_callback
return self
def set_executing_callback(self, executing_callback=None):
""" Will be called as soon as the command is sent to tss.js """
self.executing_callback = executing_callback
return self
# ------------------------- finish chain: execute ------------------------------ #
def append_to_fast_queue(self):
Debug('command', "CMD queued @FAST: %s" % self.id)
return self._append_to_queue('fast')
def append_to_slow_queue(self):
Debug('command', "CMD queued @SLOW: %s" % self.id)
return self._append_to_queue('slow')
def append_to_both_queues(self):
return self.append_to_slow_queue() \
and self.append_to_fast_queue()
def _append_to_queue(self, process_type):
if not self.project.processes.is_initialized():
return False
self.time_queue = time.time()
self.time_last_bounce = self.time_queue
if process_type == 'fast':
self.project.processes.fast.send_async_command(self)
elif process_type == 'slow':
self.project.processes.slow.send_async_command(self)
return True
# ------------------------- call callbacks ---------------------------------- #
def on_replaced(self, by):
""" calls callback by using sublime.set_timeout """
by.time_last_bounce = max(self.time_last_bounce, by.time_last_bounce)
if self.replaced_callback is not None:
sublime.set_timeout(lambda:self.replaced_callback(by, **self.callback_kwargs),000)
Debug('command+', "CMD replaced after %fs [ %s ]" % (time.time() - self.time_queue, self.id))
def on_result(self, tss_answer):
""" calls callback by using sublime.set_timeout """
self.is_executed = True
if self.result_callback is not None:
if self.json_decode_tss_answer:
tss_answer = json.loads(tss_answer)
sublime.set_timeout(lambda:self.result_callback(tss_answer, **self.callback_kwargs),000)
self.time_finish = time.time()
Debug('command', "CMD %fs = %fs + %fs to execute %s" % (
self.time_finish - self.time_queue,
self.time_execute - self.time_queue,
self.time_finish - self.time_execute,
self.id))
def on_execute(self):
""" calls executing_callback using sublime.set_timeout """
if self.executing_callback is not None:
sublime.set_timeout(lambda: self.executing_callback(**self.callback_kwargs))
# ------------------------- debouncing helpers ---------------------------------- #
def create_new_queue_trigger_command(self):
"""
Creates an AsyncCommand instance which then can be added to queue without having any effect.
"""
return AsyncCommand("!trigger!", self.project).set_id("trigger")
def is_only_a_queue_trigger_command(self):
""" Returns True if this is a command without effect. """
return self.id == "trigger"
def can_be_executed_now(self):
""" Returns False if debouncing is activated but timeout not finished. Otherwise True. """
if self.debounce_time:
return time.time() - self.time_last_bounce > self.debounce_time
else:
return True # debounce not activated
def time_until_execution(self):
""" Returns 0 or time until execution is allowed (debouncing) """
if self.debounce_time:
return self.debounce_time - (time.time() - self.time_last_bounce)
else:
return 0 # debounce not activated
|
48d889bde60f0604ca21b70fb43c9d4f055b5b89
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/chromeos/ash/services/network_config/DEPS
|
fed9ee87e253db1027346329711a82d7dcf36b85
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 299
|
DEPS
|
include_rules = [
"+components/captive_portal",
"+components/onc",
"+components/user_manager",
]
specific_include_rules = {
"cros_network_config_unittest.cc": [
"+components/proxy_config",
"+components/sync_preferences/testing_pref_service_syncable.h",
"+third_party/re2"
],
}
|
|
3cd0249d5fc26f967e9844e53b78a4b6f6d6a397
|
6958f617af0c5a76304ceb1006c77bc70ca0e195
|
/tests/python/test_global_store_grad.py
|
6f98e9f016759fd0b274db171dc8f3604a15773d
|
[
"Apache-2.0"
] |
permissive
|
taichi-dev/taichi
|
3fae315a494f1c97392d5b931c939abbbfba1bdc
|
b30b511f55e3d0ebff765ee048d0aaa4ba9e7667
|
refs/heads/master
| 2023-09-02T13:28:18.208792
| 2023-08-23T23:22:43
| 2023-08-23T23:22:43
| 74,660,642
| 17,231
| 1,841
|
Apache-2.0
| 2023-09-14T11:29:32
| 2016-11-24T10:00:05
|
C++
|
UTF-8
|
Python
| false
| false
| 547
|
py
|
test_global_store_grad.py
|
"""
import taichi as ti
ti.lang.impl.current_cfg().print_ir = True
def test_global_store_branching():
# ti.reset()
N = 16
x = ti.field(ti.f32)
y = ti.field(ti.f32)
ti.root.dense(ti.i, N).place(x)
ti.root.dense(ti.i, N).place(y)
ti.root.lazy_grad()
@ti.kernel
def oldeven():
for i in range(N):
if i % 2 == 0:
x[i] = y[i]
for i in range(N):
x.grad[i] = 1
oldeven()
oldeven.grad()
for i in range(N):
assert y.grad[i] == (i % 2 == 0)
"""
|
6afb61db60b8fd9dcbe86228a9ada4ee188ef15a
|
8de79ab1818c535dcd8ad6e0c92b5c9642ffb82a
|
/sphinx/deprecation.py
|
8a242d7da47f1858aa5191d73de29bfec34d78b6
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
sphinx-doc/sphinx
|
632d75bfc7bef14904f3d847e6de6d37594a13a5
|
eab54533a56119c5badd5aac647c595a9adae720
|
refs/heads/master
| 2023-08-16T18:21:54.073511
| 2023-08-15T17:36:47
| 2023-08-15T17:36:47
| 28,710,753
| 6,138
| 2,587
|
NOASSERTION
| 2023-09-14T14:22:28
| 2015-01-02T10:53:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,859
|
py
|
deprecation.py
|
"""Sphinx deprecation classes and utilities."""
from __future__ import annotations
import warnings
class RemovedInSphinx80Warning(DeprecationWarning):
pass
class RemovedInSphinx90Warning(PendingDeprecationWarning):
pass
RemovedInNextVersionWarning = RemovedInSphinx80Warning
def _deprecation_warning(
module: str,
attribute: str,
canonical_name: str,
*,
remove: tuple[int, int],
) -> None:
"""Helper function for module-level deprecations using __getattr__
Exemplar usage:
.. code:: python
# deprecated name -> (object to return, canonical path or empty string)
_DEPRECATED_OBJECTS = {
'deprecated_name': (object_to_return, 'fully_qualified_replacement_name', (8, 0)),
}
def __getattr__(name):
if name not in _DEPRECATED_OBJECTS:
msg = f'module {__name__!r} has no attribute {name!r}'
raise AttributeError(msg)
from sphinx.deprecation import _deprecation_warning
deprecated_object, canonical_name, remove = _DEPRECATED_OBJECTS[name]
_deprecation_warning(__name__, name, canonical_name, remove=remove)
return deprecated_object
"""
if remove == (8, 0):
warning_class: type[Warning] = RemovedInSphinx80Warning
elif remove == (9, 0):
warning_class = RemovedInSphinx90Warning
else:
msg = f'removal version {remove!r} is invalid!'
raise RuntimeError(msg)
qualified_name = f'{module}.{attribute}'
if canonical_name:
message = (f'The alias {qualified_name!r} is deprecated, '
f'use {canonical_name!r} instead.')
else:
message = f'{qualified_name!r} is deprecated.'
warnings.warn(message + " Check CHANGES for Sphinx API modifications.",
warning_class, stacklevel=3)
|
8ce6f78b0560e22cff9fcf38cc11367eb1af97b3
|
d7363da78e6f1e8ae2c6abca3f845853756165d4
|
/src/adafruit_blinka/board/nvidia/jetson_orin_nx.py
|
1aa27d37a186f8dca72b6a1374188d8a9f7efdb6
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Blinka
|
7a9ed88f39ff12082d1b46647fa8869b541fba49
|
009b352a3234339000c32d2e61e830455cf389fa
|
refs/heads/main
| 2023-08-09T06:25:02.178935
| 2023-07-28T16:45:40
| 2023-07-28T16:45:40
| 120,540,744
| 398
| 331
|
MIT
| 2023-09-14T20:32:23
| 2018-02-07T00:25:03
|
Python
|
UTF-8
|
Python
| false
| false
| 699
|
py
|
jetson_orin_nx.py
|
# SPDX-FileCopyrightText: 2022 Linh Hoang for NVIDIA
# SPDX-FileCopyrightText: 2022 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""Pin definitions for Jetson AGX Orin."""
from adafruit_blinka.microcontroller.tegra.t234 import pin
SDA = pin.SDA
SCL = pin.SCL
SDA_1 = pin.SDA_1
SCL_1 = pin.SCL_1
D4 = pin.AC06
D5 = pin.Q05
D6 = pin.Q06
D7 = pin.Z07
D8 = pin.Z06
D9 = pin.Z04
D10 = pin.Z05
D11 = pin.Z03
D12 = pin.G06
D13 = pin.H00
D16 = pin.R05
D17 = pin.R04
D18 = pin.H07
D19 = pin.I02
D20 = pin.I01
D21 = pin.I00
D22 = pin.N01
D23 = pin.Y04
D24 = pin.Y03
D25 = pin.Y01
D26 = pin.Y02
D27 = pin.Y00
CE1 = D7
CE0 = D8
MISO = D9
MOSI = D10
SCLK = D11
SCK = D11
|
e15b56bed35837bea91cc428281ad6968fe0df02
|
6df06b8581a29e93f8d375211ec6ac2626839592
|
/rally/common/db/migrations/versions/2017_06_35fe16d4ab1c_update_tasks_based_on_workloads.py
|
a6bb7efd189411671ed42571d8f4d6b6c1646b79
|
[
"Apache-2.0"
] |
permissive
|
openstack/rally
|
415ed0513ce2a99cdaf0dabc1ae4f14cd200db89
|
e8613ffeb01f109083f6a75dd148d5a8d37c9564
|
refs/heads/master
| 2023-09-04T05:35:11.862008
| 2023-05-19T21:31:59
| 2023-05-23T08:09:06
| 12,645,326
| 278
| 291
|
Apache-2.0
| 2023-04-22T02:34:29
| 2013-09-06T13:58:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,450
|
py
|
2017_06_35fe16d4ab1c_update_tasks_based_on_workloads.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""update-tasks-based-on-workloads
Update "pass_sla" and "duration" fields of tasks and subtasks based on
workloads.
Revision ID: 35fe16d4ab1c
Revises: 92aaaa2a6bb3
Create Date: 2017-06-07 19:50:03.572493
"""
from alembic import op
import sqlalchemy as sa
from rally.common.db import sa_types
from rally import exceptions
# revision identifiers, used by Alembic.
revision = "35fe16d4ab1c"
down_revision = "92aaaa2a6bb3"
branch_labels = None
depends_on = None
task_helper = sa.Table(
"tasks",
sa.MetaData(),
sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
sa.Column("uuid", sa.String(36), nullable=False),
sa.Column("task_duration", sa.Float()),
sa.Column("pass_sla", sa.Boolean())
)
subtask_helper = sa.Table(
"subtasks",
sa.MetaData(),
sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
sa.Column("uuid", sa.String(36), nullable=False),
sa.Column("duration", sa.Float()),
sa.Column("pass_sla", sa.Boolean())
)
workload_helper = sa.Table(
"workloads",
sa.MetaData(),
sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
sa.Column("uuid", sa.String(36), nullable=False),
sa.Column("task_uuid", sa.String(length=36), nullable=False),
sa.Column("subtask_uuid", sa.String(length=36), nullable=False),
sa.Column("load_duration", sa.Float()),
sa.Column("pass_sla", sa.Boolean()),
)
def upgrade():
tasks = {}
subtasks = {}
with op.batch_alter_table("workloads") as batch_op:
# change type of column
batch_op.drop_column("start_time")
batch_op.add_column(sa.Column("start_time", sa_types.TimeStamp))
connection = op.get_bind()
for w in connection.execute(workload_helper.select()):
tasks.setdefault(w.task_uuid, {"task_duration": 0, "pass_sla": True})
subtasks.setdefault(w.subtask_uuid, {"duration": 0, "pass_sla": True})
tasks[w.task_uuid]["task_duration"] += w.load_duration
subtasks[w.subtask_uuid]["duration"] += w.load_duration
if not w.pass_sla:
tasks[w.task_uuid]["pass_sla"] = False
subtasks[w.subtask_uuid]["pass_sla"] = False
for subtask in connection.execute(subtask_helper.select()):
values = subtasks.get(subtask.uuid, {"duration": 0.0,
"pass_sla": True})
connection.execute(subtask_helper.update().where(
subtask_helper.c.id == subtask.id).values(**values))
for task in connection.execute(task_helper.select()):
values = tasks.get(task.uuid, {"task_duration": 0.0,
"pass_sla": True})
connection.execute(task_helper.update().where(
task_helper.c.id == task.id).values(**values))
def downgrade():
raise exceptions.DowngradeNotSupported()
|
a6a959112c3cddaad1273e161619e82b43e5d979
|
4506d81df5ae98078e5cbe79f613514ad12b1c83
|
/nipype/interfaces/mixins/reporting.py
|
90ca80461869e525de9b2360a8cb394402e16b5e
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
nipy/nipype
|
d52eba1b98fda68e24d006ac0d5701fc8a531b9c
|
03a236320fa229299d637ff9af97865a6ae76aca
|
refs/heads/master
| 2023-08-28T10:36:07.020541
| 2023-08-25T13:40:09
| 2023-08-25T13:40:09
| 791,477
| 692
| 569
|
NOASSERTION
| 2023-09-11T06:04:51
| 2010-07-22T17:06:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,892
|
py
|
reporting.py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" class mixin and utilities for enabling reports for nipype interfaces """
import os
from abc import abstractmethod
from ... import logging
from ..base import File, BaseInterface, BaseInterfaceInputSpec, TraitedSpec
iflogger = logging.getLogger("nipype.interface")
class ReportCapableInputSpec(BaseInterfaceInputSpec):
out_report = File(
"report",
usedefault=True,
hash_files=False,
desc="filename for the visual report",
)
class ReportCapableOutputSpec(TraitedSpec):
out_report = File(desc="filename for the visual report")
class ReportCapableInterface(BaseInterface):
"""Mixin to enable reporting for Nipype interfaces"""
_out_report = None
def __init__(self, generate_report=False, **kwargs):
super().__init__(**kwargs)
self.generate_report = generate_report
def _post_run_hook(self, runtime):
runtime = super()._post_run_hook(runtime)
# leave early if there's nothing to do
if not self.generate_report:
return runtime
self._out_report = self.inputs.out_report
if not os.path.isabs(self._out_report):
self._out_report = os.path.abspath(
os.path.join(runtime.cwd, self._out_report)
)
self._generate_report()
return runtime
def _list_outputs(self):
try:
outputs = super()._list_outputs()
except NotImplementedError:
outputs = {}
if self._out_report is not None:
outputs["out_report"] = self._out_report
return outputs
@abstractmethod
def _generate_report(self):
"""
Saves report to file identified by _out_report instance variable
"""
raise NotImplementedError
|
1d5b6f516af2d752c8b5ae21058433ed8ee56606
|
4506d81df5ae98078e5cbe79f613514ad12b1c83
|
/nipype/interfaces/minc/tests/test_auto_Extract.py
|
6c34b443f0bdc07d2f77d3c67b8fbbf095cafce8
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
nipy/nipype
|
d52eba1b98fda68e24d006ac0d5701fc8a531b9c
|
03a236320fa229299d637ff9af97865a6ae76aca
|
refs/heads/master
| 2023-08-28T10:36:07.020541
| 2023-08-25T13:40:09
| 2023-08-25T13:40:09
| 791,477
| 692
| 569
|
NOASSERTION
| 2023-09-11T06:04:51
| 2010-07-22T17:06:49
|
Python
|
UTF-8
|
Python
| false
| false
| 7,185
|
py
|
test_auto_Extract.py
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..minc import Extract
def test_Extract_inputs():
input_map = dict(
args=dict(
argstr="%s",
),
count=dict(
argstr="-count %s",
sep=",",
),
environ=dict(
nohash=True,
usedefault=True,
),
flip_any_direction=dict(
argstr="-any_direction",
xor=(
"flip_positive_direction",
"flip_negative_direction",
"flip_any_direction",
),
),
flip_negative_direction=dict(
argstr="-negative_direction",
xor=(
"flip_positive_direction",
"flip_negative_direction",
"flip_any_direction",
),
),
flip_positive_direction=dict(
argstr="-positive_direction",
xor=(
"flip_positive_direction",
"flip_negative_direction",
"flip_any_direction",
),
),
flip_x_any=dict(
argstr="-xanydirection",
xor=("flip_x_positive", "flip_x_negative", "flip_x_any"),
),
flip_x_negative=dict(
argstr="-xdirection",
xor=("flip_x_positive", "flip_x_negative", "flip_x_any"),
),
flip_x_positive=dict(
argstr="+xdirection",
xor=("flip_x_positive", "flip_x_negative", "flip_x_any"),
),
flip_y_any=dict(
argstr="-yanydirection",
xor=("flip_y_positive", "flip_y_negative", "flip_y_any"),
),
flip_y_negative=dict(
argstr="-ydirection",
xor=("flip_y_positive", "flip_y_negative", "flip_y_any"),
),
flip_y_positive=dict(
argstr="+ydirection",
xor=("flip_y_positive", "flip_y_negative", "flip_y_any"),
),
flip_z_any=dict(
argstr="-zanydirection",
xor=("flip_z_positive", "flip_z_negative", "flip_z_any"),
),
flip_z_negative=dict(
argstr="-zdirection",
xor=("flip_z_positive", "flip_z_negative", "flip_z_any"),
),
flip_z_positive=dict(
argstr="+zdirection",
xor=("flip_z_positive", "flip_z_negative", "flip_z_any"),
),
image_maximum=dict(
argstr="-image_maximum %s",
),
image_minimum=dict(
argstr="-image_minimum %s",
),
image_range=dict(
argstr="-image_range %s %s",
),
input_file=dict(
argstr="%s",
extensions=None,
mandatory=True,
position=-2,
),
nonormalize=dict(
argstr="-nonormalize",
xor=("normalize", "nonormalize"),
),
normalize=dict(
argstr="-normalize",
xor=("normalize", "nonormalize"),
),
out_file=dict(
argstr="> %s",
extensions=None,
genfile=True,
position=-1,
),
output_file=dict(
extensions=None,
hash_files=False,
keep_extension=False,
name_source=["input_file"],
name_template="%s.raw",
position=-1,
),
start=dict(
argstr="-start %s",
sep=",",
),
write_ascii=dict(
argstr="-ascii",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_byte=dict(
argstr="-byte",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_double=dict(
argstr="-double",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_float=dict(
argstr="-float",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_int=dict(
argstr="-int",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_long=dict(
argstr="-long",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_range=dict(
argstr="-range %s %s",
),
write_short=dict(
argstr="-short",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_signed=dict(
argstr="-signed",
xor=("write_signed", "write_unsigned"),
),
write_unsigned=dict(
argstr="-unsigned",
xor=("write_signed", "write_unsigned"),
),
)
inputs = Extract.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Extract_outputs():
output_map = dict(
output_file=dict(
extensions=None,
),
)
outputs = Extract.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
562ca4624cd1779a922bff15dcd1b0831ae34dd6
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/MetroX_CircuitPython/mib_potentiometer_threshold/code.py
|
01948ac0405df87440eb4a972e3debf56e29ba08
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 519
|
py
|
code.py
|
# SPDX-FileCopyrightText: 2021 Brent Rubell for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
'mib_potentiometer_THRESHOLD.py'.
=================================================
turns on a LED when the potentiometer is above a half-turn
"""
import analogio
import board
import digitalio
LED = digitalio.DigitalInOut(board.D13)
LED.switch_to_output()
POT = analogio.AnalogIn(board.A0)
THRESHOLD = 512
while True:
if POT.value > THRESHOLD:
LED.value = True
else:
LED.value = False
|
3e9aaf106ffa3b4e6b8342ed0aa01ad2093a8e91
|
10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8
|
/tests/unit/lib/utils/test_git_repo.py
|
67760e435a69ecfee43d7f40f2315720f0f6afdd
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws/aws-sam-cli
|
6d4411aacf7f861e75e5cf4882a32858797a276d
|
b297ff015f2b69d7c74059c2d42ece1c29ea73ee
|
refs/heads/develop
| 2023-08-30T23:28:36.179932
| 2023-08-30T21:58:26
| 2023-08-30T21:58:26
| 92,205,085
| 1,402
| 470
|
Apache-2.0
| 2023-09-14T21:14:23
| 2017-05-23T18:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 13,535
|
py
|
test_git_repo.py
|
import subprocess
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch, MagicMock, ANY, call
import os
from samcli.lib.utils.git_repo import GitRepo, rmtree_callback, CloneRepoException, CloneRepoUnstableStateException
REPO_URL = "REPO URL"
REPO_NAME = "REPO NAME"
CLONE_DIR = os.path.normpath("/tmp/local/clone/dir")
EXPECTED_DEFAULT_CLONE_PATH = os.path.normpath(os.path.join(CLONE_DIR, REPO_NAME))
COMMIT = "123"
class TestGitRepo(TestCase):
def setUp(self):
self.repo = GitRepo(url=REPO_URL)
self.local_clone_dir = MagicMock()
self.local_clone_dir.joinpath.side_effect = lambda sub_dir: os.path.normpath(os.path.join(CLONE_DIR, sub_dir))
def test_ensure_clone_directory_exists(self):
self.repo._ensure_clone_directory_exists(self.local_clone_dir) # No exception is thrown
self.local_clone_dir.mkdir.assert_called_once_with(mode=0o700, parents=True, exist_ok=True)
def test_ensure_clone_directory_exists_fail(self):
self.local_clone_dir.mkdir.side_effect = OSError
with self.assertRaises(OSError):
self.repo._ensure_clone_directory_exists(self.local_clone_dir)
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_git_executable_not_windows(self, mock_platform, mock_popen):
mock_platform.return_value = "Not Windows"
executable = self.repo.git_executable()
self.assertEqual(executable, "git")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_git_executable_windows(self, mock_platform, mock_popen):
mock_platform.return_value = "Windows"
executable = self.repo.git_executable()
self.assertEqual(executable, "git")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
def test_git_executable_fails(self, mock_popen):
mock_popen.side_effect = OSError("fail")
with self.assertRaises(OSError):
self.repo.git_executable()
@patch("samcli.lib.utils.git_repo.Path.exists")
@patch("samcli.lib.utils.git_repo.shutil")
@patch("samcli.lib.utils.git_repo.check_output")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_clone_happy_case(self, platform_mock, popen_mock, check_output_mock, shutil_mock, path_exist_mock):
platform_mock.return_value = "Not Windows"
path_exist_mock.return_value = False
self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME)
self.local_clone_dir.mkdir.assert_called_once_with(mode=0o700, parents=True, exist_ok=True)
popen_mock.assert_called_once_with(["git"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
check_output_mock.assert_has_calls(
[call(["git", "clone", self.repo.url, REPO_NAME], cwd=ANY, stderr=subprocess.STDOUT)]
)
shutil_mock.rmtree.assert_not_called()
shutil_mock.copytree.assert_called_with(ANY, EXPECTED_DEFAULT_CLONE_PATH)
@patch("samcli.lib.utils.git_repo.Path.exists")
@patch("samcli.lib.utils.git_repo.shutil")
@patch("samcli.lib.utils.git_repo.check_output")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_clone_create_new_local_repo(
self, platform_mock, popen_mock, check_output_mock, shutil_mock, path_exist_mock
):
path_exist_mock.return_value = False
self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME)
shutil_mock.rmtree.assert_not_called()
shutil_mock.copytree.assert_called_with(ANY, EXPECTED_DEFAULT_CLONE_PATH)
@patch("samcli.lib.utils.git_repo.Path.exists")
@patch("samcli.lib.utils.git_repo.shutil")
@patch("samcli.lib.utils.git_repo.check_output")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_clone_replace_current_local_repo_if_replace_existing_flag_is_set(
self, platform_mock, popen_mock, check_output_mock, shutil_mock, path_exist_mock
):
path_exist_mock.return_value = True
self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME, replace_existing=True)
self.local_clone_dir.mkdir.assert_called_once_with(mode=0o700, parents=True, exist_ok=True)
shutil_mock.rmtree.assert_called_with(EXPECTED_DEFAULT_CLONE_PATH, onerror=rmtree_callback)
shutil_mock.copytree.assert_called_with(ANY, EXPECTED_DEFAULT_CLONE_PATH)
@patch("samcli.lib.utils.git_repo.Path.exists")
@patch("samcli.lib.utils.git_repo.check_output")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_clone_fail_if_current_local_repo_exists_and_replace_existing_flag_is_not_set(
self, platform_mock, popen_mock, check_output_mock, path_exist_mock
):
path_exist_mock.return_value = True
with self.assertRaises(CloneRepoException):
self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME) # replace_existing=False by default
@patch("samcli.lib.utils.git_repo.shutil")
@patch("samcli.lib.utils.git_repo.check_output")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_clone_attempt_is_set_to_true_after_clone(self, platform_mock, popen_mock, check_output_mock, shutil_mock):
self.assertFalse(self.repo.clone_attempted)
self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME)
self.assertTrue(self.repo.clone_attempted)
@patch("samcli.lib.utils.git_repo.shutil")
@patch("samcli.lib.utils.git_repo.check_output")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_clone_attempt_is_set_to_true_even_if_clone_failed(
self, platform_mock, popen_mock, check_output_mock, shutil_mock
):
check_output_mock.side_effect = subprocess.CalledProcessError("fail", "fail", "not found".encode("utf-8"))
self.assertFalse(self.repo.clone_attempted)
try:
with self.assertRaises(CloneRepoException):
self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME)
except:
pass
self.assertTrue(self.repo.clone_attempted)
@patch("samcli.lib.utils.git_repo.shutil")
@patch("samcli.lib.utils.git_repo.check_output")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_clone_failed_to_create_the_clone_directory(
self, platform_mock, popen_mock, check_output_mock, shutil_mock
):
self.local_clone_dir.mkdir.side_effect = OSError
try:
with self.assertRaises(OSError):
self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME)
except:
pass
self.local_clone_dir.mkdir.assert_called_once_with(mode=0o700, parents=True, exist_ok=True)
popen_mock.assert_not_called()
check_output_mock.assert_not_called()
shutil_mock.assert_not_called()
@patch("samcli.lib.utils.git_repo.shutil")
@patch("samcli.lib.utils.git_repo.check_output")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_clone_when_the_subprocess_fail(self, platform_mock, popen_mock, check_output_mock, shutil_mock):
check_output_mock.side_effect = subprocess.CalledProcessError("fail", "fail", "any reason".encode("utf-8"))
with self.assertRaises(CloneRepoException):
self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME)
@patch("samcli.lib.utils.git_repo.LOG")
@patch("samcli.lib.utils.git_repo.check_output")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_clone_when_the_git_repo_not_found(self, platform_mock, popen_mock, check_output_mock, log_mock):
check_output_mock.side_effect = subprocess.CalledProcessError("fail", "fail", "not found".encode("utf-8"))
try:
with self.assertRaises(CloneRepoException):
self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME)
except Exception:
pass
log_mock.warning.assert_called()
@patch("samcli.lib.utils.git_repo.Path.exists")
@patch("samcli.lib.utils.git_repo.shutil")
@patch("samcli.lib.utils.git_repo.check_output")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_clone_when_failed_to_move_cloned_repo_from_temp_to_final_destination(
self, platform_mock, popen_mock, check_output_mock, shutil_mock, path_exist_mock
):
path_exist_mock.return_value = True
shutil_mock.copytree.side_effect = OSError
try:
with self.assertRaises(CloneRepoUnstableStateException):
self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME, replace_existing=True)
except Exception:
pass
shutil_mock.rmtree.assert_called_once_with(EXPECTED_DEFAULT_CLONE_PATH, onerror=rmtree_callback)
shutil_mock.copytree.assert_called_once_with(ANY, EXPECTED_DEFAULT_CLONE_PATH)
@patch("samcli.lib.utils.git_repo.LOG")
@patch("samcli.lib.utils.git_repo.check_output")
def test_checkout_commit_when_commit_not_exist(self, check_output_mock, log_mock):
check_output_mock.side_effect = subprocess.CalledProcessError(
"fail", "fail", "fatal: reference is not a tree".encode("utf-8")
)
try:
with self.assertRaises(CloneRepoException):
self.repo._checkout_commit(repo_dir="test", commit="1234")
except Exception:
pass
log_mock.warning.assert_called()
@patch("samcli.lib.utils.git_repo.Path.exists")
@patch("samcli.lib.utils.git_repo.shutil")
@patch("samcli.lib.utils.git_repo.check_output")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_clone_with_commit(self, platform_mock, popen_mock, check_output_mock, shutil_mock, path_exist_mock):
platform_mock.return_value = "Not Windows"
path_exist_mock.return_value = False
self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME, commit=COMMIT)
self.local_clone_dir.mkdir.assert_called_once_with(mode=0o700, parents=True, exist_ok=True)
popen_mock.assert_has_calls(
[call(["git"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)],
[call(["git"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)],
)
check_output_mock.assert_has_calls(
[call(["git", "clone", self.repo.url, REPO_NAME], cwd=ANY, stderr=subprocess.STDOUT)],
[call(["git", "checkout", COMMIT], cwd=ANY, stderr=subprocess.STDOUT)],
)
shutil_mock.rmtree.assert_not_called()
shutil_mock.copytree.assert_called_with(ANY, EXPECTED_DEFAULT_CLONE_PATH)
@patch("samcli.lib.utils.git_repo.Path.exists")
@patch("samcli.lib.utils.git_repo.shutil")
@patch("samcli.lib.utils.git_repo.check_output")
@patch("samcli.lib.utils.git_repo.subprocess.Popen")
@patch("samcli.lib.utils.git_repo.platform.system")
def test_clone_with_longpaths_configured_in_windows(
self, platform_mock, popen_mock, check_output_mock, shutil_mock, path_exist_mock
):
platform_mock.return_value = "windows"
path_exist_mock.return_value = False
self.repo.clone(clone_dir=self.local_clone_dir, clone_name=REPO_NAME)
self.local_clone_dir.mkdir.assert_called_once_with(mode=0o700, parents=True, exist_ok=True)
popen_mock.assert_called_once_with(["git"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
check_output_mock.assert_has_calls(
[
call(
["git", "clone", self.repo.url, REPO_NAME, "--config", "core.longpaths=true"],
cwd=ANY,
stderr=subprocess.STDOUT,
)
]
)
shutil_mock.rmtree.assert_not_called()
shutil_mock.copytree.assert_called_with(ANY, EXPECTED_DEFAULT_CLONE_PATH)
@patch("samcli.lib.utils.git_repo.Path")
@patch("samcli.lib.utils.git_repo.platform.system")
@patch("samcli.lib.utils.git_repo.os.path.normpath")
def test_clone_without_windows_longpath_exception_message(self, normpath_mock, platform_mock, path_exist_mock):
path_exist_mock.side_effect = OSError()
platform_mock.return_value = "windows"
with self.assertRaises(CloneRepoUnstableStateException) as ex:
GitRepo._persist_local_repo(MagicMock(), MagicMock(), MagicMock(), MagicMock())
expected_msg = (
"Failed to modify a local file when cloning app templates. "
"MAX_PATH should be enabled in the Windows registry."
"\nFor more details on how to enable MAX_PATH for Windows, please visit: "
"https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/install-sam-cli.html"
)
self.assertEqual(str(ex.exception), expected_msg)
|
bc4e74566cb3246c09051f2d2498e10fdfd6ec20
|
ecca79bf1491492befcccf5af27c1653d4c34685
|
/grappa/runner.py
|
175d2b9458c106207ab659134599985e453bc2f9
|
[
"MIT"
] |
permissive
|
grappa-py/grappa
|
d1545e9c9cbc161b3f7f068962b6c78a15707320
|
f1861e1572e68f031977e86a5d9eba1957bd164e
|
refs/heads/master
| 2021-06-01T10:31:17.896919
| 2020-11-23T18:18:27
| 2020-11-23T18:18:27
| 81,199,115
| 143
| 17
|
MIT
| 2020-11-23T23:01:27
| 2017-02-07T11:04:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,084
|
py
|
runner.py
|
# -*- coding: utf-8 -*-
from .reporter import ErrorReporter
class Runner(object):
"""
Runner is responsible of triggering the registed assertion operators in the
current engine.
Arguments:
engine (grappa.Engine)
"""
def __init__(self, engine):
self.engine = engine
def render_error(self, ctx, error):
# Expose keywords via context (this should be improved)
ctx.keywords = self.engine.keywords
# Render error exception
return ErrorReporter(ctx).run(error)
def run_assertions(self, ctx):
# Trigger assertion functions
for assertion in self.engine.assertions:
# Store current subject
subject = ctx.subject
# Run assertion with the given subject
result = assertion(ctx.subject)
# Check if the subject changed during operator execution
if subject is not ctx.subject:
# Register previous subject
ctx.subjects.append(subject)
# If assertion passed, just continue with it
if result is True:
continue
# Forward original grappa error
if all([isinstance(result, AssertionError),
hasattr(result, '__grappa__')]):
return result
# Otherwise render assertion error accordingly
return self.render_error(ctx, result)
def run(self, ctx):
"""
Runs the current phase.
"""
# Reverse engine assertion if needed
if ctx.reverse:
self.engine.reverse()
if self.engine.empty:
raise AssertionError('grappa: no assertions to run')
try:
# Run assertion in series and return error, if present
return self.run_assertions(ctx)
except Exception as _err:
# Handle legit grappa internval errors
if getattr(_err, '__legit__', False):
raise _err
# Otherwise render it
return self.render_error(ctx, _err)
|
36338995fe8778cf2247fbc2fb07b0c8205a4dd2
|
1acb41c1157c8eb5c3988f24baa72bdbf697931c
|
/easyfsl/datasets/__init__.py
|
4b78324270f2b0842f498ff5434960f5da74a6ea
|
[
"MIT"
] |
permissive
|
sicara/easy-few-shot-learning
|
f2050a7dcacc9fcc1de7502f03fd3491cc175f69
|
509dd9aa60879ad76b687002dc5648c9b4f337b8
|
refs/heads/master
| 2023-09-01T19:24:10.217392
| 2023-09-01T08:59:43
| 2023-09-01T08:59:43
| 334,984,118
| 634
| 107
|
MIT
| 2023-09-11T15:23:48
| 2021-02-01T14:55:41
|
Python
|
UTF-8
|
Python
| false
| false
| 367
|
py
|
__init__.py
|
from .cub import CUB
from .danish_fungi import DanishFungi
from .easy_set import EasySet
from .features_dataset import FeaturesDataset
from .few_shot_dataset import FewShotDataset
from .mini_imagenet import MiniImageNet
from .support_set_folder import SupportSetFolder
from .tiered_imagenet import TieredImageNet
from .wrap_few_shot_dataset import WrapFewShotDataset
|
fa9fe327462772d5a2469df6e1cbbab3f8d87cd0
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/text/__init__.py
|
e22fa1d34a1c5bb5bcea5b93c295a4728fcc9cc1
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 36
|
py
|
__init__.py
|
"""Tests for the text component."""
|
4bbe1e8bc52f156c4e6108f6a84a3f47c2bcc60e
|
3a3e715407bff57f7811356ddbb58d097d3ce6ab
|
/smartsheet/models/automation_action.py
|
8363bf4b6ea13ed926255a74fd25a4fb4d94ae38
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
smartsheet-platform/smartsheet-python-sdk
|
cc72585df2290ce89adc236f2c38e04717b75269
|
e32cef3c7faeba30a4cec37f9ac15b5a1d72cc83
|
refs/heads/master
| 2023-01-12T19:22:31.806414
| 2022-12-06T16:35:04
| 2022-12-06T16:35:04
| 28,953,329
| 137
| 94
|
Apache-2.0
| 2023-01-19T12:35:26
| 2015-01-08T06:59:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,521
|
py
|
automation_action.py
|
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2018 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from .enums import AutomationActionFrequency, AutomationActionType
from .recipient import Recipient
from ..types import *
from ..util import serialize
from ..util import deserialize
class AutomationAction(object):
"""Smartsheet AutomationAction data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the AutomationAction model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._frequency = EnumeratedValue(AutomationActionFrequency)
self._include_all_columns = Boolean()
self._include_attachments = Boolean()
self._include_discussions = Boolean()
self._included_column_ids = TypedList(six.integer_types)
self._message = String()
self._notify_all_shared_users = Boolean()
self._recipient_column_ids = TypedList(six.integer_types)
self._recipients = TypedList(Recipient)
self._subject = String()
self._type_ = EnumeratedValue(AutomationActionType)
if props:
deserialize(self, props)
# requests package Response object
self.request_response = None
self.__initialized = True
def __getattr__(self, key):
if key == 'type':
return self.type_
else:
raise AttributeError(key)
def __setattr__(self, key, value):
if key == 'type':
self.type_ = value
else:
super(AutomationAction, self).__setattr__(key, value)
@property
def frequency(self):
return self._frequency
@frequency.setter
def frequency(self, value):
self._frequency.set(value)
@property
def include_all_columns(self):
return self._include_all_columns.value
@include_all_columns.setter
def include_all_columns(self, value):
self._include_all_columns.value = value
@property
def include_attachments(self):
return self._include_attachments.value
@include_attachments.setter
def include_attachments(self, value):
self._include_attachments.value = value
@property
def include_discussions(self):
return self._include_discussions.value
@include_discussions.setter
def include_discussions(self, value):
self._include_discussions.value = value
@property
def included_column_ids(self):
return self._included_column_ids
@included_column_ids.setter
def included_column_ids(self, value):
self._included_column_ids.load(value)
@property
def message(self):
return self._message.value
@message.setter
def message(self, value):
self._message.value = value
@property
def notify_all_shared_users(self):
return self._notify_all_shared_users.value
@notify_all_shared_users.setter
def notify_all_shared_users(self, value):
self._notify_all_shared_users.value = value
@property
def recipient_column_ids(self):
return self._recipient_column_ids
@recipient_column_ids.setter
def recipient_column_ids(self, value):
self._recipient_column_ids.load(value)
@property
def recipients(self):
return self._recipients
@recipients.setter
def recipients(self, value):
self._recipients.load(value)
@property
def subject(self):
return self._subject.value
@subject.setter
def subject(self, value):
self._subject.value = value
@property
def type_(self):
return self._type_
@type_.setter
def type_(self, value):
self._type_.set(value)
def to_dict(self):
return serialize(self)
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_json()
|
939f9e8ba8411df912d91105e69a70295d1f0cbb
|
a0b346b88c3c481a829016773e0a6f2b0004d659
|
/python/integration-tests/compiler/var_length_short_calculation_test.py
|
590c6301740d7b6f4a1f595ee9cedac420a1c113
|
[
"BSD-3-Clause",
"Zlib",
"Python-2.0",
"Apache-2.0",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
KeyviDev/keyvi
|
4c5f512e2e13f5f8e21e82b91c1443c271b70b91
|
3a816037a74da049b01b522f42a6fdfb61f00a4a
|
refs/heads/master
| 2023-01-11T03:44:18.571671
| 2022-12-14T20:20:56
| 2022-12-14T20:20:56
| 109,309,657
| 235
| 42
|
Apache-2.0
| 2022-12-29T19:16:08
| 2017-11-02T19:28:41
|
C++
|
UTF-8
|
Python
| false
| false
| 950
|
py
|
var_length_short_calculation_test.py
|
# -*- coding: utf-8 -*-
# Usage: py.test tests
import sys
import os
import json
from keyvi.compiler import JsonDictionaryCompiler
root = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(root, "../../tests"))
from test_tools import tmp_dictionary
def test_input_output_keys():
compiler = JsonDictionaryCompiler({'compression_threshold': '32', 'compression': 'zlib', "memory_limit_mb":"10"})
input_keys_count = 0
with open(os.path.join(root, 'var_length_short_calculation_test_data.tsv')) as f_in:
for line in f_in:
k, v = line.split('\t')
key = json.loads(k)
value = json.loads(v)
compiler.Add(key, value)
input_keys_count += 1
output_keys_count = 0
with tmp_dictionary(compiler, 'var_length_short_test.kv') as d:
for _ in d.GetAllItems():
output_keys_count += 1
assert input_keys_count == output_keys_count
|
477987614cfcbf8c30b449e17ff991337a65db2f
|
5a6ccde5f37cc86b6fc0812b2bf40f42eab23906
|
/C-set/606C.Sorting Railway Cars.py
|
7902da89a86a38066e74ca84c77d3e5c3062780d
|
[] |
no_license
|
Waqar-107/Codeforces
|
23f2b1edffb85f6f020107f03e09a455d3e6e792
|
f0d2f25aa6a09c06083b82c39cdf3288ec2eecba
|
refs/heads/master
| 2023-03-09T07:55:46.583363
| 2023-03-04T09:57:44
| 2023-03-04T09:57:44
| 82,915,896
| 196
| 138
| null | 2023-02-11T22:06:20
| 2017-02-23T10:29:34
|
C++
|
UTF-8
|
Python
| false
| false
| 289
|
py
|
606C.Sorting Railway Cars.py
|
# from dust i have come dust i will be
n=int(input())
a=list(map(int,input().split()))
mp=[0]*(n+1)
for i in range(n):
mp[a[i]]=i+1
cnt=0;mx=1
for i in range(1,n):
if mp[i]<mp[i+1]:
cnt+=1
else:
mx=max(mx,cnt+1)
cnt=0
mx=max(mx,cnt+1)
print(n-mx)
|
49b93e5566eea2a3eb5563a98c3f7e0e276d2e2b
|
167c6226bc77c5daaedab007dfdad4377f588ef4
|
/python/ql/src/Classes/EqualsOrNotEquals.py
|
7e1ece7685c52f5aaaf8754bb2a5bb150e6bf419
|
[
"MIT"
] |
permissive
|
github/codeql
|
1eebb449a34f774db9e881b52cb8f7a1b1a53612
|
d109637e2d7ab3b819812eb960c05cb31d9d2168
|
refs/heads/main
| 2023-08-20T11:32:39.162059
| 2023-08-18T14:33:32
| 2023-08-18T14:33:32
| 143,040,428
| 5,987
| 1,363
|
MIT
| 2023-09-14T19:36:50
| 2018-07-31T16:35:51
|
CodeQL
|
UTF-8
|
Python
| false
| false
| 863
|
py
|
EqualsOrNotEquals.py
|
class PointOriginal(object):
def __init__(self, x, y):
self._x, x
self._y = y
def __repr__(self):
return 'Point(%r, %r)' % (self._x, self._y)
def __eq__(self, other): # Incorrect: equality is defined but inequality is not
if not isinstance(other, Point):
return False
return self._x == other._x and self._y == other._y
class PointUpdated(object):
def __init__(self, x, y):
self._x, x
self._y = y
def __repr__(self):
return 'Point(%r, %r)' % (self._x, self._y)
def __eq__(self, other):
if not isinstance(other, Point):
return False
return self._x == other._x and self._y == other._y
def __ne__(self, other): # Improved: equality and inequality method defined (hash method still missing)
return not self == other
|
e11ae7aeb236a94789ceb73f4cc7161c82c10619
|
019f03d6713a2bc5344b644aeb5ebe70aaf7cfd0
|
/src/super_gradients/training/datasets/segmentation_datasets/segmentation_dataset.py
|
976a3c50ffb3698d2e1a1e3da5cc378a8c5ffbf3
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
Deci-AI/super-gradients
|
6f52cd15bc2f9f39e3cdc6067292b6512aba5dd0
|
7240726cf6425b53a26ed2faec03672f30fee6be
|
refs/heads/master
| 2023-08-25T17:47:02.595029
| 2023-08-24T11:50:50
| 2023-08-24T11:50:50
| 432,652,408
| 3,237
| 331
|
Apache-2.0
| 2023-09-14T11:24:46
| 2021-11-28T07:58:02
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,572
|
py
|
segmentation_dataset.py
|
import os
from typing import Callable, Iterable
import numpy as np
import torch
import torchvision.transforms as transform
from PIL import Image
from tqdm import tqdm
from super_gradients.common.object_names import Datasets
from super_gradients.common.registry.registry import register_dataset
from super_gradients.common.decorators.factory_decorator import resolve_param
from super_gradients.common.factories.transforms_factory import TransformsFactory
from super_gradients.training.datasets.sg_dataset import DirectoryDataSet, ListDataset
@register_dataset(Datasets.SEGMENTATION_DATASET)
class SegmentationDataSet(DirectoryDataSet, ListDataset):
@resolve_param("transforms", factory=TransformsFactory())
def __init__(
self,
root: str,
list_file: str = None,
samples_sub_directory: str = None,
targets_sub_directory: str = None,
cache_labels: bool = False,
cache_images: bool = False,
collate_fn: Callable = None,
target_extension: str = ".png",
transforms: Iterable = None,
):
"""
SegmentationDataSet
:param root: Root folder of the Data Set
:param list_file: Path to the file with the samples list
:param samples_sub_directory: name of the samples sub-directory
:param targets_sub_directory: name of the targets sub-directory
:param cache_labels: "Caches" the labels -> Pre-Loads to memory as a list
:param cache_images: "Caches" the images -> Pre-Loads to memory as a list
:param collate_fn: collate_fn func to process batches for the Data Loader
:param target_extension: file extension of the targets (default is .png for PASCAL VOC 2012)
:param transforms: transforms to be applied on image and mask
"""
self.samples_sub_directory = samples_sub_directory
self.targets_sub_directory = targets_sub_directory
self.cache_labels = cache_labels
self.cache_images = cache_images
# CREATE A DIRECTORY DATASET OR A LIST DATASET BASED ON THE list_file INPUT VARIABLE
if list_file is not None:
ListDataset.__init__(
self,
root=root,
file=list_file,
target_extension=target_extension,
sample_loader=self.sample_loader,
sample_transform=self.sample_transform,
target_loader=self.target_loader,
target_transform=self.target_transform,
collate_fn=collate_fn,
)
else:
DirectoryDataSet.__init__(
self,
root=root,
samples_sub_directory=samples_sub_directory,
targets_sub_directory=targets_sub_directory,
target_extension=target_extension,
sample_loader=self.sample_loader,
sample_transform=self.sample_transform,
target_loader=self.target_loader,
target_transform=self.target_transform,
collate_fn=collate_fn,
)
self.transforms = transform.Compose(transforms if transforms else [])
def __getitem__(self, index):
sample_path, target_path = self.samples_targets_tuples_list[index]
# TRY TO LOAD THE CACHED IMAGE FIRST
if self.cache_images:
sample = self.imgs[index]
else:
sample = self.sample_loader(sample_path)
# TRY TO LOAD THE CACHED LABEL FIRST
if self.cache_labels:
target = self.labels[index]
else:
target = self.target_loader(target_path)
# MAKE SURE THE TRANSFORM WORKS ON BOTH IMAGE AND MASK TO ALIGN THE AUGMENTATIONS
sample, target = self._transform_image_and_mask(sample, target)
return self.sample_transform(sample), self.target_transform(target)
@staticmethod
def sample_loader(sample_path: str) -> Image:
"""
sample_loader - Loads a dataset image from path using PIL
:param sample_path: The path to the sample image
:return: The loaded Image
"""
image = Image.open(sample_path).convert("RGB")
return image
@staticmethod
def sample_transform(image):
"""
sample_transform - Transforms the sample image
:param image: The input image to transform
:return: The transformed image
"""
sample_transform = transform.Compose([transform.ToTensor(), transform.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
return sample_transform(image)
@staticmethod
def target_loader(target_path: str) -> Image:
"""
target_loader
:param target_path: The path to the sample image
:return: The loaded Image
"""
target = Image.open(target_path)
return target
@staticmethod
def target_transform(target):
"""
target_transform - Transforms the sample image
:param target: The target mask to transform
:return: The transformed target mask
"""
return torch.from_numpy(np.array(target)).long()
def _generate_samples_and_targets(self):
"""
_generate_samples_and_targets
"""
# IF THE DERIVED CLASS DID NOT IMPLEMENT AN EXPLICIT _generate_samples_and_targets CHILD METHOD
if not self.samples_targets_tuples_list:
super()._generate_samples_and_targets()
# EXTRACT THE LABELS FROM THE TUPLES LIST
image_files, label_files = map(list, zip(*self.samples_targets_tuples_list))
image_indices_to_remove = []
# CACHE IMAGES INTO MEMORY FOR FASTER TRAINING (WARNING: LARGE DATASETS MAY EXCEED SYSTEM RAM)
if self.cache_images:
# CREATE AN EMPTY LIST FOR THE LABELS
self.imgs = len(self) * [None]
cached_images_mem_in_gb = 0.0
with tqdm(image_files, desc="Caching images") as pbar:
for i, img_path in enumerate(pbar):
img = self.sample_loader(img_path)
if img is None:
image_indices_to_remove.append(i)
cached_images_mem_in_gb += os.path.getsize(image_files[i]) / 1024.0**3.0
self.imgs[i] = img
pbar.desc = "Caching images (%.1fGB)" % (cached_images_mem_in_gb)
self.img_files = [e for i, e in enumerate(image_files) if i not in image_indices_to_remove]
self.imgs = [e for i, e in enumerate(self.imgs) if i not in image_indices_to_remove]
# CACHE LABELS INTO MEMORY FOR FASTER TRAINING - RELEVANT FOR EFFICIENT VALIDATION RUNS DURING TRAINING
if self.cache_labels:
# CREATE AN EMPTY LIST FOR THE LABELS
self.labels = len(self) * [None]
with tqdm(label_files, desc="Caching labels") as pbar:
missing_labels, found_labels, duplicate_labels = 0, 0, 0
for i, file in enumerate(pbar):
labels = self.target_loader(file)
if labels is None:
missing_labels += 1
image_indices_to_remove.append(i)
continue
self.labels[i] = labels
found_labels += 1
pbar.desc = "Caching labels (%g found, %g missing, %g duplicate, for %g images)" % (
found_labels,
missing_labels,
duplicate_labels,
len(image_files),
)
assert found_labels > 0, "No labels found."
# REMOVE THE IRRELEVANT ENTRIES FROM THE DATA
self.label_files = [e for i, e in enumerate(label_files) if i not in image_indices_to_remove]
self.labels = [e for i, e in enumerate(self.labels) if i not in image_indices_to_remove]
def _transform_image_and_mask(self, image, mask) -> tuple:
"""
:param image: The input image
:param mask: The input mask
:return: The transformed image, mask
"""
transformed = self.transforms({"image": image, "mask": mask})
return transformed["image"], transformed["mask"]
|
6ba29721f1029050e11c16e7d125abdd4a240426
|
805a7b7574314415696ec97d45689e69bc7caaca
|
/gspread_pandas/conf.py
|
09db21e081be45b5c50353590190247dbe9bccd8
|
[
"BSD-3-Clause"
] |
permissive
|
aiguofer/gspread-pandas
|
a5b076606da82ec2e805c0f2dc4f82580f89b437
|
9cd68f554d863a5d9cd9d67f0c368ce21095afd4
|
refs/heads/master
| 2023-01-13T07:03:50.071636
| 2022-10-07T19:40:34
| 2022-10-07T19:40:34
| 70,650,276
| 373
| 57
|
BSD-3-Clause
| 2023-08-31T05:18:38
| 2016-10-12T01:29:35
|
Python
|
UTF-8
|
Python
| false
| false
| 5,894
|
py
|
conf.py
|
import json
import sys
from os import environ, name
from pathlib import Path
from google.oauth2.credentials import Credentials as OAuthCredentials
from google.oauth2.service_account import Credentials as SACredentials
from google_auth_oauthlib.flow import InstalledAppFlow
from gspread_pandas.exceptions import ConfigException
from gspread_pandas.util import decode
__all__ = ["default_scope", "get_config", "get_creds"]
if name == "nt":
_default_dir = Path(environ.get("APPDATA")) / "gspread_pandas"
else:
_default_dir = (
Path(environ.get("XDG_CONFIG_HOME", Path(environ.get("HOME", "")) / ".config"))
/ "gspread_pandas"
)
_default_file = "google_secret.json"
default_scope = [
"openid",
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/spreadsheets",
]
CONFIG_DIR_ENV_VAR = "GSPREAD_PANDAS_CONFIG_DIR"
def get_config_dir():
"""
Get the config directory.
It will first look in the environment variable
GSPREAD_PANDAS_CONFIG_DIR, but if it's not set it'll use
~/.config/gspread_pandas
"""
return Path(environ.get(CONFIG_DIR_ENV_VAR, _default_dir))
def ensure_path(full_path):
"""
Create path if it doesn't exist.
Parameters
----------
full_path : str
Path to create if needed
Returns
-------
None
"""
full_path = Path(full_path)
if not full_path.exists():
full_path.mkdir(parents=True, exist_ok=True)
def get_config(conf_dir=None, file_name=_default_file):
"""
Get config for Google client. Looks in ~/.config/gspread_pandas/google_secret.json
by default but you can override it with conf_dir and file_name. The creds_dir value
will be set to conf_dir/creds and the directory will be created if it doesn't exist;
if you'd like to override that you can do so by changing the 'creds_dir' value in
the dict returned by this function.
Download json from https://console.developers.google.com/apis/credentials
Parameters
----------
conf_dir : str
Full path to config dir (Default value = get_config_dir())
file_name : str
(Default value = "google_secret.json")
Returns
-------
dict
Dict with necessary contents of google_secret.json
"""
conf_dir = Path(conf_dir).expanduser() if conf_dir else get_config_dir()
cfg_file = conf_dir / file_name
if not cfg_file.exists():
raise IOError(
"No Google client config found.\n"
"Please download json from "
"https://console.developers.google.com/apis/credentials and "
"save as {}".format(cfg_file)
)
with cfg_file.open() as fp:
cfg = json.load(fp)
return cfg
def get_creds(
user="default", config=None, scope=default_scope, creds_dir=None, save=True
):
"""
Get google google.auth.credentials.Credentials for the given user. If the user
doesn't have previous creds, they will go through the OAuth flow to get new
credentials which will be saved for later use. Credentials will be saved in
config['creds_dir'], if this value is not set, then they will be stored in a folder
named ``creds`` in the default config dir (either ~/.config/gspread_pandas or.
$GSPREAD_PANDAS_CONFIG_DIR)
Alternatively, it will get credentials from a service account.
Parameters
----------
user : str
Unique key indicating user's credentials. This is not necessary when using
a ServiceAccount and will be ignored (Default value = "default")
config : dict
Optional, dict with "client_id", "client_secret", and "redirect_uris" keys for
OAuth or "type", "client_email", "private_key", "private_key_id", and
"client_id" for a Service Account. If None is passed, it will call
:meth:`get_config() <get_config>` (Default value = None)
creds_dir : str, Path
Optional, directory to load and store creds from/in. If None, it will use the
``creds`` subdirectory in the default config location. (Default value = None)
scope : list
Optional, scope to use for Google Auth (Default value = default_scope)
Returns
-------
google.auth.credentials.Credentials
Google credentials that can be used with gspread
"""
config = config or get_config()
try:
if "private_key_id" in config:
return SACredentials.from_service_account_info(config, scopes=scope)
if not isinstance(user, str):
raise ConfigException(
"Need to provide a user key as a string if not using a service account"
)
if creds_dir is None:
creds_dir = get_config_dir() / "creds"
creds_file = Path(creds_dir) / user
if creds_file.exists():
# need to convert Path to string for python 2.7
return OAuthCredentials.from_authorized_user_file(str(creds_file))
flow = InstalledAppFlow.from_client_config(config, scope)
creds = flow.run_local_server(
host="localhost",
port=8182,
authorization_prompt_message="Please visit this URL: {url}",
success_message="The auth flow is complete; you may close this window.",
open_browser=False,
)
if save:
creds_data = {
"refresh_token": creds.refresh_token,
"token_uri": creds.token_uri,
"client_id": creds.client_id,
"client_secret": creds.client_secret,
"scopes": creds.scopes,
}
ensure_path(creds_dir)
creds_file.write_text(decode(json.dumps(creds_data)))
return creds
except Exception:
exc_info = sys.exc_info()
raise ConfigException(*exc_info[1:])
|
76c04dc1717b8505e950abc724d79c53d5af124c
|
41db6c672362ccafdd28af40ecf7df51ffa90a15
|
/guppy/heapy/test/test_dependencies.py
|
ebf626ea9adc9271ebea3ef14ef4efe2bbd01ccf
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
zhuyifei1999/guppy3
|
e5649a066199a92dc3bf8e77bdd6ffb44e790449
|
1b2db87af36388e43afd6ce5774b869bcc4a9452
|
refs/heads/master
| 2023-06-24T23:03:52.612921
| 2023-06-17T01:18:06
| 2023-06-17T01:54:06
| 202,611,372
| 371
| 24
|
MIT
| 2023-06-17T01:54:09
| 2019-08-15T21:05:15
|
Python
|
UTF-8
|
Python
| false
| false
| 214
|
py
|
test_dependencies.py
|
# Test the libraries we are dependent on
# Only sets right now.
def test_main(debug=0):
print('Testing sets')
from guppy.sets import test
test.test_main()
if __name__ == "__main__":
test_main()
|
923b9481822c3895d329a34c1d8fb4018be8df4f
|
88dbb103b21489932ff1998e0565470a0f8249f9
|
/textshot/logger.py
|
8f6eb811ef8e948f1aed229aa93b57d8dbb91e8a
|
[
"MIT"
] |
permissive
|
ianzhao05/textshot
|
ffd6b9fd00a8c090984acac3e7ff856263cab827
|
6481a291369d19551013ba0aa3d601b922e25869
|
refs/heads/master
| 2023-01-30T14:56:32.586991
| 2023-01-21T16:21:10
| 2023-01-21T16:21:10
| 265,975,639
| 1,723
| 292
|
MIT
| 2023-01-21T16:21:11
| 2020-05-21T23:46:51
|
Python
|
UTF-8
|
Python
| false
| false
| 361
|
py
|
logger.py
|
from .messages import ocr_failure_message
def log_copied(copied):
print(f'INFO: Copied "{copied}" to the clipboard')
def log_ocr_failure():
"""OCR didn't recognise text."""
print_error(ocr_failure_message)
def log_ocr_error(error):
"""OCR produced an error."""
print_error(error)
def print_error(error):
print(f"ERROR: {error}")
|
eba8d0666bad291cee0e81a1a98f63913b10e4b3
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/racing/DistributedRacePad.py
|
0aab464f65c9b70b7a9d55a562db945d7c03e843
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 11,491
|
py
|
DistributedRacePad.py
|
from panda3d.core import *
from direct.directnotify import DirectNotifyGlobal
from direct.task.Task import Task
from direct.distributed.ClockDelta import *
from direct.fsm.FSM import FSM
from direct.interval.IntervalGlobal import *
from toontown.racing.DistributedKartPad import DistributedKartPad
from toontown.racing import RaceGlobals
from toontown.toonbase.ToontownTimer import ToontownTimer
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from toontown.racing.KartShopGlobals import KartGlobals
class DistributedRacePad(DistributedKartPad, FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedRacePad')
defaultTransitions = {'Off': ['WaitEmpty'],
'WaitEmpty': ['WaitCountdown', 'Off'],
'WaitCountdown': ['WaitEmpty',
'WaitBoarding',
'Off',
'AllAboard'],
'WaitBoarding': ['AllAboard', 'WaitEmpty', 'Off'],
'AllAboard': ['Off', 'WaitEmpty', 'WaitCountdown']}
id = 0
def __init__(self, cr):
self.cr = cr
DistributedKartPad.__init__(self, cr)
FSM.__init__(self, 'RacePad_%s_FSM' % self.id)
self.id = DistributedRacePad.id
DistributedRacePad.id += 1
self.trackId = None
self.trackType = None
self.timeStamp = None
self.clockNodepath = None
self.timerTask = None
self.tunnelSign = None
self.trackNameNode = None
self.tunnelSignInterval = None
return
def disable(self):
self.notify.debug('Disable')
self.ignore('enterPlayground')
self.request('Off')
if self.tunnelSignInterval:
self.tunnelSignInterval = None
DistributedKartPad.disable(self)
return
def enableStartingBlocks(self):
self.notify.debug('Enabling starting blocks')
for block in self.startingBlocks:
block.setActive(0)
def disableStartingBlocks(self):
for block in self.startingBlocks:
self.notify.debug('Disabling kart block: %s' % block.getDoId())
block.setActive(1)
def isPractice(self):
return self.trackType == RaceGlobals.Practice
def setState(self, state, timestamp):
self.request(state, [timestamp])
def setRaceZone(self, zoneId):
for block in self.startingBlocks:
if block.avId == base.localAvatar.getDoId():
hoodId = self.cr.playGame.hood.hoodId
self.cr.playGame.getPlace().doneStatus = {'loader': 'racetrack',
'where': 'racetrack',
'zoneId': zoneId,
'trackId': self.trackId,
'hoodId': hoodId}
messenger.send(base.cr.playGame.getPlace().doneEvent)
def setTrackInfo(self, trackInfo):
if self.isDisabled():
return
self.trackId, self.trackType = trackInfo
self.notify.debugStateCall(self)
self.setTunnelSignText()
self.ignore('enterPlayground')
self.acceptOnce('enterPlayground', self.setTunnelSignText)
def enterOff(self):
self.notify.debug('enterOff: Entering Off State for RacePad %s' % self.id)
if self.tunnelSignInterval:
self.tunnelSignInterval.finish()
self.cleanupTunnelText()
def exitOff(self):
self.notify.debug('exitOff: Exiting Off state for RacePad %s' % self.id)
def enterWaitEmpty(self, args):
self.notify.debug('enterWaitEmpty: Entering WaitEmpty State for RacePad %s' % self.id)
if self.tunnelSignInterval:
self.tunnelSignInterval.finish()
def exitWaitEmpty(self):
self.notify.debug('exitWaitEmpty: Exiting WaitEmpty State for RacePad %s' % self.id)
def enterWaitCountdown(self, args):
self.notify.debug('enterWaitCountdown: Entering WaitCountdown State for RacePad %s' % self.id)
self.timeStamp = args[0]
self.startCountdown()
def exitWaitCountdown(self):
self.notify.debug('exitWaitCountdown: Exiting WaitCountdown State for RacePad %s' % self.id)
self.stopCountdown()
def enterWaitBoarding(self, args):
self.notify.debug('enterWaitBoarding: Entering WaitBoarding State for RacePad %s' % self.id)
self.timeStamp = args[0]
for block in self.startingBlocks:
block.hideGui()
def exitWaitBoarding(self):
self.notify.debug('exitWaitBoarding: Exiting WaitBording State for RacePad %s' % self.id)
def enterAllAboard(self, args):
self.notify.debug('enterAllAboard: Entering AllAboard State for RacePad %s' % self.id)
for block in self.startingBlocks:
block.request('Off')
if block.av and block.kartNode:
self.notify.debug('enterAllAboard: Avatar %s is in the race.' % block.av.doId)
block.doExitToRaceTrack()
def exitAllAboard(self):
self.notify.debug('enterAllAboard: Exiting AllAboard State for RacePad %s' % self.id)
def getTimestamp(self, avId = None):
error = 'DistributedRacePad::getTimeStamp - timestamp not yet set!'
return self.timeStamp
def stopCountdown(self):
if self.timerTask:
taskMgr.remove(self.timerTask)
self.clockNodepath.removeNode()
self.clockNodepath = None
self.clockNode = None
self.timerTask = None
return
def updateTimerTask(self, task):
countdownTime = int(task.duration - task.time)
timeStr = str(countdownTime)
if self.clockNode.getText() != timeStr:
self.clockNode.setText(timeStr)
if task.time >= task.duration:
return Task.done
else:
return Task.cont
def startCountdown(self):
if not self.timerTask and self.startingBlocks:
self.makeClockGui()
duration = KartGlobals.COUNTDOWN_TIME - globalClockDelta.localElapsedTime(self.getTimestamp())
countdownTask = Task(self.updateTimerTask)
countdownTask.duration = duration
self.timerTask = taskMgr.add(countdownTask, self.uniqueName('racePadTimerTask'))
def addStartingBlock(self, block):
DistributedKartPad.addStartingBlock(self, block)
if self.state == 'WaitCountdown':
self.startCountdown()
def makeClockGui(self):
self.notify.debugStateCall(self)
if self.clockNodepath is not None:
return
self.clockNode, self.clockNodepath = self.getSignTextNodes('racePadClock')
self.clockNodepath.setPos(0, 0.125, -3.0)
self.clockNodepath.setScale(2.5)
self.clockNodepath.flattenLight()
return
def getTunnelSign(self):
cPadId = RaceGlobals.RaceInfo2RacePadId(self.trackId, self.trackType)
genreId = RaceGlobals.getTrackGenre(self.trackId)
tunnelName = RaceGlobals.getTunnelSignName(genreId, cPadId)
self.tunnelSign = self.cr.playGame.hood.loader.geom.find('**/' + tunnelName)
def getSignTextNodes(self, nodeName, font = ToontownGlobals.getSignFont()):
signTextNode = TextNode(nodeName)
signTextNode.setFont(font)
signTextNode.setAlign(TextNode.ACenter)
signTextNode.setTextColor(0.5, 0.5, 0.5, 1)
signTextNodepath = self.tunnelSign.attachNewNode(signTextNode)
signTextNodepath.setPos(0, 0.25, 0)
signTextNodepath.setH(165.0)
signTextNodepath.setDepthWrite(0)
return (signTextNode, signTextNodepath)
def setTunnelSignText(self):
self.notify.debugStateCall(self)
self.getTunnelSign()
if not self.tunnelSign or self.tunnelSign.isEmpty():
return
if not self.trackNameNode:
self.makeTextNodes()
if self.tunnelSignInterval:
self.tunnelSignInterval.finish()
self.tunnelSignInterval = Sequence(Func(self.hideTunnelSignText), Wait(0.2), Func(self.showTunnelSignText), Wait(0.2), Func(self.hideTunnelSignText), Wait(0.2), Func(self.showTunnelSignText), Wait(0.2), Func(self.hideTunnelSignText), Wait(0.2), Func(self.updateTunnelSignText), Func(self.showTunnelSignText))
self.tunnelSignInterval.start()
def hideTunnelSignText(self):
if self.tunnelSign:
textNodePaths = self.tunnelSign.findAllMatches('**/+TextNode')
numTextNodePaths = textNodePaths.getNumPaths()
for i in range(numTextNodePaths):
textNodePath = textNodePaths.getPath(i)
textNodePath.hide()
def showTunnelSignText(self):
if self.tunnelSign:
textNodePaths = self.tunnelSign.findAllMatches('**/+TextNode')
numTextNodePaths = textNodePaths.getNumPaths()
for i in range(numTextNodePaths):
textNodePath = textNodePaths.getPath(i)
textNodePath.show()
def updateTunnelSignText(self):
self.notify.debugStateCall(self)
trackNameString = TTLocalizer.KartRace_TrackNames[self.trackId]
if not self.trackNameNode:
self.notify.warning('invalid trackNameNode, just returning')
return
self.trackNameNode.setText(trackNameString)
trackTypeString = TTLocalizer.KartRace_RaceNames[self.trackType]
self.trackTypeNode.setText(trackTypeString)
deposit = 0
if self.trackType:
deposit = RaceGlobals.getEntryFee(self.trackId, self.trackType)
depositString = TTLocalizer.KartRace_DepositPhrase + str(deposit)
self.depositNode.setText(depositString)
time = RaceGlobals.TrackDict[self.trackId][1]
secs, hundredths = divmod(time, 1)
min, sec = divmod(secs, 60)
timeText = '%02d:%02d:%02d' % (min, sec, hundredths * 100)
qualifyString = TTLocalizer.KartRace_QualifyPhrase + timeText
self.qualifyNode.setText(qualifyString)
def makeTextNodes(self):
self.notify.debugStateCall(self)
self.trackNameNode, trackNameNodePath = self.getSignTextNodes('trackNameNode')
trackNameNodePath.setZ(0.7)
trackNameNodePath.setScale(0.875)
trackNameNodePath.flattenLight()
self.trackTypeNode, trackTypeNodePath = self.getSignTextNodes('trackTypeNode')
trackTypeNodePath.setZ(-0.35)
trackTypeNodePath.setScale(0.875)
trackTypeNodePath.flattenLight()
self.depositNode, depositNodePath = self.getSignTextNodes('depositNode', ToontownGlobals.getToonFont())
self.depositNode.setTextColor(0, 0, 0, 1)
depositNodePath.setPos(4.0, -1.0, -2.0)
depositNodePath.setScale(0.75)
depositNodePath.flattenLight()
self.qualifyNode, qualifyNodePath = self.getSignTextNodes('qualifyNode', ToontownGlobals.getToonFont())
self.qualifyNode.setTextColor(0, 0, 0, 1)
qualifyNodePath.setPos(-4.0, 1.2, -2.0)
qualifyNodePath.setScale(0.75)
qualifyNodePath.flattenLight()
def cleanupTunnelText(self):
self.notify.debugStateCall(self)
if self.tunnelSign:
textNodePaths = self.tunnelSign.findAllMatches('**/+TextNode')
numTextNodePaths = textNodePaths.getNumPaths()
for i in range(numTextNodePaths):
textNodePath = textNodePaths.getPath(i)
textNodePath.removeNode()
textNodePath = None
self.tunnelSign = None
self.trackNameNode = None
return
|
cb882040141ec3b1d2102b8eb59aa64a6a3cfd9f
|
5f69a6549b8d5e417553d910622e6855b2ae679b
|
/src/opendr/control/multi_object_search/algorithm/SB3/type_aliases.py
|
0727481e3ef410d52b33c6417c614f13abd9b75e
|
[
"Apache-2.0"
] |
permissive
|
opendr-eu/opendr
|
822219f709613d77c5eb62c5d02808d344239835
|
b3d6ce670cdf63469fc5766630eb295d67b3d788
|
refs/heads/master
| 2023-08-31T07:02:36.375231
| 2023-08-29T06:39:51
| 2023-08-29T06:39:51
| 293,755,225
| 535
| 82
|
Apache-2.0
| 2023-09-13T16:53:34
| 2020-09-08T08:55:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
type_aliases.py
|
# Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Union
import torch as th
TensorDict = Dict[Union[str, int], th.Tensor]
class DictRolloutBufferSamples():
def __init__(self, observations, actions, old_values, old_log_prob, advantages, returns, aux_angle, aux_angle_gt):
self.observations = observations
self.actions = actions
self.old_values = old_values
self.old_log_prob = old_log_prob
self.advantages = advantages
self.returns = returns
self.aux_angle = aux_angle
self.aux_angle_gt = aux_angle_gt
|
c5bd324733212ad2d7a87f717f2ce16d3ec3f46f
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/magtu_ru/selection_of_specialty.py
|
87273d5d17fbf0f5c333fa30d9d8088adc891ceb
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
selection_of_specialty.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
"""Скрипт получает список специальностей в формате HTML, разбирает таблицу и оформляет ее в JSON"""
import json
import sys
import requests
from bs4 import BeautifulSoup
def element_to_text_list(el) -> str:
return ", ".join([child.strip() for child in el.strings])
url = "http://magtu.ru/modules/mod_abiturient_helper/tmpl/get_spec.php"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:50.0) Gecko/20100101 Firefox/50.0",
"X-Requested-With": "XMLHttpRequest",
}
# Русский язык и Математика
post_data = {"data": "01,02"}
rs = requests.post(url, headers=headers, data=post_data)
if not rs.ok or not rs.text:
print("Post запрос не вернул данные таблицы. Возможно, не хватает каких-то данных.")
sys.exit()
root = BeautifulSoup(rs.text, "lxml")
table_rows = []
for tr in root.select("table tr")[1:]:
td_list = tr.select("td")
row_data = {
"number": element_to_text_list(td_list[0]),
"code": element_to_text_list(td_list[1]),
"speciality": element_to_text_list(td_list[2]),
"level_of_education": element_to_text_list(td_list[3]),
"budget_or_contract": element_to_text_list(td_list[4]),
"mode_of_study": element_to_text_list(td_list[5]),
"institute_faculty": element_to_text_list(td_list[6]),
"list_of_examinations": element_to_text_list(td_list[7]),
}
table_rows.append(row_data)
json_text = json.dumps(table_rows, indent=4, ensure_ascii=False, sort_keys=True)
print(json_text)
|
f5668d09d9e2e1b4bc220105b8a8443dcff06acb
|
5f179375aed694eff1a7e09162ad3661e5500cbe
|
/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/simple_encoder_test.py
|
27404cc0dd1edb1c0d8f2b5bdb2f2c71337025cf
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/model-optimization
|
615d6e970ff756e292204772c359e7f6f98c06c6
|
4733c85f21d1eb570fd575ea201cb211a485bfb0
|
refs/heads/master
| 2023-08-15T09:17:45.313544
| 2023-08-01T18:36:44
| 2023-08-01T18:37:12
| 155,619,942
| 1,550
| 338
|
Apache-2.0
| 2023-09-14T17:42:31
| 2018-10-31T20:34:28
|
Python
|
UTF-8
|
Python
| false
| false
| 10,119
|
py
|
simple_encoder_test.py
|
# Copyright 2019, The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import tensorflow as tf
# TODO(b/139939526): Move to public API.
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow_model_optimization.python.core.internal.tensor_encoding.core import core_encoder
from tensorflow_model_optimization.python.core.internal.tensor_encoding.core import simple_encoder
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing import test_utils
# Abbreviated constants used in tests.
TENSORS = simple_encoder._TENSORS
P1_VALS = test_utils.PlusOneEncodingStage.ENCODED_VALUES_KEY
T2_VALS = test_utils.TimesTwoEncodingStage.ENCODED_VALUES_KEY
SIF_SIGNS = test_utils.SignIntFloatEncodingStage.ENCODED_SIGNS_KEY
SIF_INTS = test_utils.SignIntFloatEncodingStage.ENCODED_INTS_KEY
SIF_FLOATS = test_utils.SignIntFloatEncodingStage.ENCODED_FLOATS_KEY
PN_VALS = test_utils.PlusOneOverNEncodingStage.ENCODED_VALUES_KEY
class SimpleEncoderTest(tf.test.TestCase, parameterized.TestCase):
@tf_test_util.run_all_in_graph_and_eager_modes
def test_basic_encode_decode(self):
"""Tests basic encoding and decoding works as expected."""
x = tf.constant(1.0, tf.float32)
encoder = simple_encoder.SimpleEncoder(
core_encoder.EncoderComposer(
test_utils.PlusOneOverNEncodingStage()).make(),
tf.TensorSpec.from_tensor(x))
state = encoder.initial_state()
iteration = _make_iteration_function(encoder)
for i in range(1, 10):
x, encoded_x, decoded_x, state = self.evaluate(iteration(x, state))
self.assertAllClose(x, decoded_x)
self.assertAllClose(1.0 + 1 / i,
_encoded_x_field(encoded_x, [TENSORS, PN_VALS]))
@tf_test_util.run_all_in_graph_and_eager_modes
def test_composite_encoder(self):
"""Tests functionality with a general, composite `Encoder`."""
x = tf.constant(1.2)
encoder = core_encoder.EncoderComposer(
test_utils.SignIntFloatEncodingStage())
encoder.add_child(test_utils.TimesTwoEncodingStage(), SIF_SIGNS)
encoder.add_child(test_utils.PlusOneEncodingStage(), SIF_INTS)
encoder.add_child(test_utils.TimesTwoEncodingStage(), SIF_FLOATS).add_child(
test_utils.PlusOneOverNEncodingStage(), T2_VALS)
encoder = simple_encoder.SimpleEncoder(encoder.make(),
tf.TensorSpec.from_tensor(x))
state = encoder.initial_state()
iteration = _make_iteration_function(encoder)
for i in range(1, 10):
x, encoded_x, decoded_x, state = self.evaluate(iteration(x, state))
self.assertAllClose(x, decoded_x)
self.assertAllClose(
2.0, _encoded_x_field(encoded_x, [TENSORS, SIF_SIGNS, T2_VALS]))
self.assertAllClose(
2.0, _encoded_x_field(encoded_x, [TENSORS, SIF_INTS, P1_VALS]))
self.assertAllClose(
0.4 + 1 / i,
_encoded_x_field(encoded_x, [TENSORS, SIF_FLOATS, T2_VALS, PN_VALS]))
@tf_test_util.run_all_in_graph_and_eager_modes
def test_none_state_equal_to_initial_state(self):
"""Tests that not providing state is the same as initial_state."""
x = tf.constant(1.0)
encoder = simple_encoder.SimpleEncoder(
core_encoder.EncoderComposer(
test_utils.PlusOneOverNEncodingStage()).make(),
tf.TensorSpec.from_tensor(x))
state = encoder.initial_state()
stateful_iteration = _make_iteration_function(encoder)
@tf.function
def stateless_iteration(x):
encoded_x, _ = encoder.encode(x)
decoded_x = encoder.decode(encoded_x)
return encoded_x, decoded_x
_, encoded_x_stateful, decoded_x_stateful, _ = self.evaluate(
stateful_iteration(x, state))
encoded_x_stateless, decoded_x_stateless = self.evaluate(
stateless_iteration(x))
self.assertAllClose(encoded_x_stateful, encoded_x_stateless)
self.assertAllClose(decoded_x_stateful, decoded_x_stateless)
@tf_test_util.run_all_in_graph_and_eager_modes
def test_python_constants_not_exposed(self):
"""Tests that only TensorFlow values are exposed to users."""
x = tf.constant(1.0)
tensorspec = tf.TensorSpec.from_tensor(x)
encoder_py = simple_encoder.SimpleEncoder(
core_encoder.EncoderComposer(
test_utils.SimpleLinearEncodingStage(2.0, 3.0)).make(), tensorspec)
a_var = tf.compat.v1.get_variable('a_var', initializer=2.0)
b_var = tf.compat.v1.get_variable('b_var', initializer=3.0)
encoder_tf = simple_encoder.SimpleEncoder(
core_encoder.EncoderComposer(
test_utils.SimpleLinearEncodingStage(a_var, b_var)).make(),
tensorspec)
state_py = encoder_py.initial_state()
state_tf = encoder_tf.initial_state()
iteration_py = _make_iteration_function(encoder_py)
iteration_tf = _make_iteration_function(encoder_tf)
self.evaluate(tf.compat.v1.global_variables_initializer())
_, encoded_x_py, decoded_x_py, _ = self.evaluate(iteration_py(x, state_py))
_, encoded_x_tf, decoded_x_tf, _ = self.evaluate(iteration_tf(x, state_tf))
# The encoded_x_tf should have two elements that encoded_x_py does not.
# These correspond to the two variables created passed on to constructor of
# encoder_tf, which are exposed as params. For encoder_py, these are python
# integers, and should thus be hidden from users.
self.assertLen(encoded_x_tf, len(encoded_x_py) + 2)
# Make sure functionality is still the same.
self.assertAllClose(x, decoded_x_tf)
self.assertAllClose(x, decoded_x_py)
@tf_test_util.run_all_in_graph_and_eager_modes
def test_decode_needs_input_shape_static(self):
"""Tests that mechanism for passing input shape works with static shape."""
x = tf.reshape(list(range(15)), [3, 5])
encoder = simple_encoder.SimpleEncoder(
core_encoder.EncoderComposer(
test_utils.ReduceMeanEncodingStage()).make(),
tf.TensorSpec.from_tensor(x))
state = encoder.initial_state()
iteration = _make_iteration_function(encoder)
_, _, decoded_x, _ = self.evaluate(iteration(x, state))
self.assertAllEqual([[7.0] * 5] * 3, decoded_x)
def test_not_fully_defined_shape_raises(self):
"""Tests tensorspec without fully defined shape."""
encoder = core_encoder.EncoderComposer(
test_utils.PlusOneOverNEncodingStage()).make()
with self.assertRaisesRegex(TypeError, 'fully defined'):
simple_encoder.SimpleEncoder(encoder, tf.TensorSpec((None,), tf.float32))
@tf_test_util.run_all_in_graph_and_eager_modes
def test_input_signature_enforced(self):
"""Tests that encode/decode input signature is enforced."""
x = tf.constant(1.0)
encoder = simple_encoder.SimpleEncoder(
core_encoder.EncoderComposer(
test_utils.PlusOneOverNEncodingStage()).make(),
tf.TensorSpec.from_tensor(x))
state = encoder.initial_state()
with self.assertRaises((TypeError, ValueError)):
bad_x = tf.stack([x, x])
encoder.encode(bad_x, state)
with self.assertRaises((TypeError, ValueError)):
bad_state = state + (x,)
encoder.encode(x, bad_state)
encoded_x = encoder.encode(x, state)
with self.assertRaises(ValueError):
bad_encoded_x = dict(encoded_x)
bad_encoded_x.update({'x': x})
encoder.decode(bad_encoded_x)
def test_input_tensorspec(self):
"""Tests input_tensorspec property."""
x = tf.constant([[1.0, 2.0], [3.0, 4.0]])
encoder = simple_encoder.SimpleEncoder(
core_encoder.EncoderComposer(
test_utils.PlusOneOverNEncodingStage()).make(),
tf.TensorSpec.from_tensor(x))
self.assertTrue(encoder.input_tensorspec.is_compatible_with(x))
@parameterized.parameters([1.0, 'str', object])
def test_not_an_encoder_raises(self, not_an_encoder):
"""Tests invalid encoder argument."""
tensorspec = tf.TensorSpec((1,), tf.float32)
with self.assertRaisesRegex(TypeError, 'Encoder'):
simple_encoder.SimpleEncoder(not_an_encoder, tensorspec)
@parameterized.parameters([1.0, 'str', object])
def test_not_a_tensorspec_raises(self, not_a_tensorspec):
"""Tests invalid type of tensorspec argument."""
encoder = core_encoder.EncoderComposer(
test_utils.PlusOneOverNEncodingStage()).make()
with self.assertRaisesRegex(TypeError, 'TensorSpec'):
simple_encoder.SimpleEncoder(encoder, not_a_tensorspec)
def _make_iteration_function(encoder):
assert isinstance(encoder, simple_encoder.SimpleEncoder)
@tf.function
def iteration(x, state):
encoded_x, new_state = encoder.encode(x, state)
decoded_x = encoder.decode(encoded_x)
return x, encoded_x, decoded_x, new_state
return iteration
def _encoded_x_field(encoded_x, path):
"""Returns a field from `encoded_x` returned by the `encode` method.
In order to test the correctness of encoding, we also need to access the
encoded objects, which in turns depends on an implementation detail (the
specific use of `nest.flatten_with_joined_string_paths`). This dependence is
constrained to a single place in this utility.
Args:
encoded_x: The structure returned by the `encode` method.
path: A list of keys corresponding to the path in the nested dictionary
before it was flattened.
Returns:
A value from `encoded_x` corresponding to the `path`.
"""
return encoded_x['/'.join(path)]
if __name__ == '__main__':
tf.test.main()
|
2025e6204b11b2ec9f2f0fd64658daa11c7b99ee
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/operations/_cassandra_resources_operations.py
|
b79a366aba1a4efea57487f2a967cf601153683c
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 211,805
|
py
|
_cassandra_resources_operations.py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_cassandra_keyspaces_request(
resource_group_name: str, account_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_cassandra_keyspace_request(
resource_group_name: str, account_name: str, keyspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_update_cassandra_keyspace_request(
resource_group_name: str, account_name: str, keyspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_cassandra_keyspace_request(
resource_group_name: str, account_name: str, keyspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs)
def build_get_cassandra_keyspace_throughput_request(
resource_group_name: str, account_name: str, keyspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/throughputSettings/default",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_cassandra_keyspace_throughput_request(
resource_group_name: str, account_name: str, keyspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/throughputSettings/default",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_migrate_cassandra_keyspace_to_autoscale_request(
resource_group_name: str, account_name: str, keyspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/throughputSettings/default/migrateToAutoscale",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_migrate_cassandra_keyspace_to_manual_throughput_request(
resource_group_name: str, account_name: str, keyspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/throughputSettings/default/migrateToManualThroughput",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_cassandra_tables_request(
resource_group_name: str, account_name: str, keyspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_cassandra_table_request(
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"tableName": _SERIALIZER.url("table_name", table_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_update_cassandra_table_request(
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"tableName": _SERIALIZER.url("table_name", table_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_cassandra_table_request(
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"tableName": _SERIALIZER.url("table_name", table_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs)
def build_get_cassandra_table_throughput_request(
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}/throughputSettings/default",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"tableName": _SERIALIZER.url("table_name", table_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_cassandra_table_throughput_request(
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}/throughputSettings/default",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"tableName": _SERIALIZER.url("table_name", table_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_migrate_cassandra_table_to_autoscale_request(
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}/throughputSettings/default/migrateToAutoscale",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"tableName": _SERIALIZER.url("table_name", table_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_migrate_cassandra_table_to_manual_throughput_request(
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}/throughputSettings/default/migrateToManualThroughput",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"tableName": _SERIALIZER.url("table_name", table_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_cassandra_views_request(
resource_group_name: str, account_name: str, keyspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_cassandra_view_request(
resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"viewName": _SERIALIZER.url("view_name", view_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_update_cassandra_view_request(
resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"viewName": _SERIALIZER.url("view_name", view_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_cassandra_view_request(
resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"viewName": _SERIALIZER.url("view_name", view_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs)
def build_get_cassandra_view_throughput_request(
resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}/throughputSettings/default",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"viewName": _SERIALIZER.url("view_name", view_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_cassandra_view_throughput_request(
resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}/throughputSettings/default",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"viewName": _SERIALIZER.url("view_name", view_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_migrate_cassandra_view_to_autoscale_request(
resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}/throughputSettings/default/migrateToAutoscale",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"viewName": _SERIALIZER.url("view_name", view_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_migrate_cassandra_view_to_manual_throughput_request(
resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}/throughputSettings/default/migrateToManualThroughput",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"keyspaceName": _SERIALIZER.url("keyspace_name", keyspace_name, "str"),
"viewName": _SERIALIZER.url("view_name", view_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class CassandraResourcesOperations: # pylint: disable=too-many-public-methods
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.cosmosdb.CosmosDBManagementClient`'s
:attr:`cassandra_resources` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_cassandra_keyspaces(
self, resource_group_name: str, account_name: str, **kwargs: Any
) -> Iterable["_models.CassandraKeyspaceGetResults"]:
"""Lists the Cassandra keyspaces under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CassandraKeyspaceGetResults or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.CassandraKeyspaceGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CassandraKeyspaceListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_cassandra_keyspaces_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_cassandra_keyspaces.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CassandraKeyspaceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_cassandra_keyspaces.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces"
}
@distributed_trace
def get_cassandra_keyspace(
self, resource_group_name: str, account_name: str, keyspace_name: str, **kwargs: Any
) -> _models.CassandraKeyspaceGetResults:
"""Gets the Cassandra keyspaces under an existing Azure Cosmos DB database account with the
provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CassandraKeyspaceGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.CassandraKeyspaceGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CassandraKeyspaceGetResults] = kwargs.pop("cls", None)
request = build_get_cassandra_keyspace_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_cassandra_keyspace.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("CassandraKeyspaceGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_cassandra_keyspace.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}"
}
def _create_update_cassandra_keyspace_initial(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
create_update_cassandra_keyspace_parameters: Union[_models.CassandraKeyspaceCreateUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.CassandraKeyspaceGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.CassandraKeyspaceGetResults]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(create_update_cassandra_keyspace_parameters, (IOBase, bytes)):
_content = create_update_cassandra_keyspace_parameters
else:
_json = self._serialize.body(
create_update_cassandra_keyspace_parameters, "CassandraKeyspaceCreateUpdateParameters"
)
request = build_create_update_cassandra_keyspace_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_update_cassandra_keyspace_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("CassandraKeyspaceGetResults", pipeline_response)
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_update_cassandra_keyspace_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}"
}
@overload
def begin_create_update_cassandra_keyspace(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
create_update_cassandra_keyspace_parameters: _models.CassandraKeyspaceCreateUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.CassandraKeyspaceGetResults]:
"""Create or update an Azure Cosmos DB Cassandra keyspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param create_update_cassandra_keyspace_parameters: The parameters to provide for the current
Cassandra keyspace. Required.
:type create_update_cassandra_keyspace_parameters:
~azure.mgmt.cosmosdb.models.CassandraKeyspaceCreateUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CassandraKeyspaceGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.CassandraKeyspaceGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_update_cassandra_keyspace(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
create_update_cassandra_keyspace_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.CassandraKeyspaceGetResults]:
"""Create or update an Azure Cosmos DB Cassandra keyspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param create_update_cassandra_keyspace_parameters: The parameters to provide for the current
Cassandra keyspace. Required.
:type create_update_cassandra_keyspace_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CassandraKeyspaceGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.CassandraKeyspaceGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_update_cassandra_keyspace(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
create_update_cassandra_keyspace_parameters: Union[_models.CassandraKeyspaceCreateUpdateParameters, IO],
**kwargs: Any
) -> LROPoller[_models.CassandraKeyspaceGetResults]:
"""Create or update an Azure Cosmos DB Cassandra keyspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param create_update_cassandra_keyspace_parameters: The parameters to provide for the current
Cassandra keyspace. Is either a CassandraKeyspaceCreateUpdateParameters type or a IO type.
Required.
:type create_update_cassandra_keyspace_parameters:
~azure.mgmt.cosmosdb.models.CassandraKeyspaceCreateUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CassandraKeyspaceGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.CassandraKeyspaceGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CassandraKeyspaceGetResults] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_update_cassandra_keyspace_initial(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
create_update_cassandra_keyspace_parameters=create_update_cassandra_keyspace_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("CassandraKeyspaceGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_update_cassandra_keyspace.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}"
}
def _delete_cassandra_keyspace_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, account_name: str, keyspace_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_cassandra_keyspace_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_cassandra_keyspace_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_cassandra_keyspace_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}"
}
@distributed_trace
def begin_delete_cassandra_keyspace(
self, resource_group_name: str, account_name: str, keyspace_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Deletes an existing Azure Cosmos DB Cassandra keyspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_cassandra_keyspace_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete_cassandra_keyspace.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}"
}
@distributed_trace
def get_cassandra_keyspace_throughput(
self, resource_group_name: str, account_name: str, keyspace_name: str, **kwargs: Any
) -> _models.ThroughputSettingsGetResults:
"""Gets the RUs per second of the Cassandra Keyspace under an existing Azure Cosmos DB database
account with the provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ThroughputSettingsGetResults] = kwargs.pop("cls", None)
request = build_get_cassandra_keyspace_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_cassandra_keyspace_throughput.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_cassandra_keyspace_throughput.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/throughputSettings/default"
}
def _update_cassandra_keyspace_throughput_initial(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
update_throughput_parameters: Union[_models.ThroughputSettingsUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.ThroughputSettingsGetResults]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(update_throughput_parameters, (IOBase, bytes)):
_content = update_throughput_parameters
else:
_json = self._serialize.body(update_throughput_parameters, "ThroughputSettingsUpdateParameters")
request = build_update_cassandra_keyspace_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_cassandra_keyspace_throughput_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_update_cassandra_keyspace_throughput_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/throughputSettings/default"
}
@overload
def begin_update_cassandra_keyspace_throughput(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
update_throughput_parameters: _models.ThroughputSettingsUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB Cassandra Keyspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Cassandra Keyspace. Required.
:type update_throughput_parameters:
~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update_cassandra_keyspace_throughput(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
update_throughput_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB Cassandra Keyspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Cassandra Keyspace. Required.
:type update_throughput_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update_cassandra_keyspace_throughput(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
update_throughput_parameters: Union[_models.ThroughputSettingsUpdateParameters, IO],
**kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB Cassandra Keyspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Cassandra Keyspace. Is either a ThroughputSettingsUpdateParameters type or a IO type.
Required.
:type update_throughput_parameters:
~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ThroughputSettingsGetResults] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._update_cassandra_keyspace_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
update_throughput_parameters=update_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update_cassandra_keyspace_throughput.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/throughputSettings/default"
}
def _migrate_cassandra_keyspace_to_autoscale_initial(
self, resource_group_name: str, account_name: str, keyspace_name: str, **kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[Optional[_models.ThroughputSettingsGetResults]] = kwargs.pop("cls", None)
request = build_migrate_cassandra_keyspace_to_autoscale_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._migrate_cassandra_keyspace_to_autoscale_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_migrate_cassandra_keyspace_to_autoscale_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/throughputSettings/default/migrateToAutoscale"
}
@distributed_trace
def begin_migrate_cassandra_keyspace_to_autoscale(
self, resource_group_name: str, account_name: str, keyspace_name: str, **kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Migrate an Azure Cosmos DB Cassandra Keyspace from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ThroughputSettingsGetResults] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._migrate_cassandra_keyspace_to_autoscale_initial(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_migrate_cassandra_keyspace_to_autoscale.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/throughputSettings/default/migrateToAutoscale"
}
def _migrate_cassandra_keyspace_to_manual_throughput_initial(
self, resource_group_name: str, account_name: str, keyspace_name: str, **kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[Optional[_models.ThroughputSettingsGetResults]] = kwargs.pop("cls", None)
request = build_migrate_cassandra_keyspace_to_manual_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._migrate_cassandra_keyspace_to_manual_throughput_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_migrate_cassandra_keyspace_to_manual_throughput_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/throughputSettings/default/migrateToManualThroughput"
}
@distributed_trace
def begin_migrate_cassandra_keyspace_to_manual_throughput(
self, resource_group_name: str, account_name: str, keyspace_name: str, **kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Migrate an Azure Cosmos DB Cassandra Keyspace from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ThroughputSettingsGetResults] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._migrate_cassandra_keyspace_to_manual_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_migrate_cassandra_keyspace_to_manual_throughput.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/throughputSettings/default/migrateToManualThroughput"
}
@distributed_trace
def list_cassandra_tables(
self, resource_group_name: str, account_name: str, keyspace_name: str, **kwargs: Any
) -> Iterable["_models.CassandraTableGetResults"]:
"""Lists the Cassandra table under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CassandraTableGetResults or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.CassandraTableGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CassandraTableListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_cassandra_tables_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_cassandra_tables.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CassandraTableListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_cassandra_tables.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables"
}
@distributed_trace
def get_cassandra_table(
self, resource_group_name: str, account_name: str, keyspace_name: str, table_name: str, **kwargs: Any
) -> _models.CassandraTableGetResults:
"""Gets the Cassandra table under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param table_name: Cosmos DB table name. Required.
:type table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CassandraTableGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.CassandraTableGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CassandraTableGetResults] = kwargs.pop("cls", None)
request = build_get_cassandra_table_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
table_name=table_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_cassandra_table.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("CassandraTableGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_cassandra_table.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}"
}
def _create_update_cassandra_table_initial(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
create_update_cassandra_table_parameters: Union[_models.CassandraTableCreateUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.CassandraTableGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.CassandraTableGetResults]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(create_update_cassandra_table_parameters, (IOBase, bytes)):
_content = create_update_cassandra_table_parameters
else:
_json = self._serialize.body(
create_update_cassandra_table_parameters, "CassandraTableCreateUpdateParameters"
)
request = build_create_update_cassandra_table_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
table_name=table_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_update_cassandra_table_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("CassandraTableGetResults", pipeline_response)
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_update_cassandra_table_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}"
}
@overload
def begin_create_update_cassandra_table(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
create_update_cassandra_table_parameters: _models.CassandraTableCreateUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.CassandraTableGetResults]:
"""Create or update an Azure Cosmos DB Cassandra Table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param table_name: Cosmos DB table name. Required.
:type table_name: str
:param create_update_cassandra_table_parameters: The parameters to provide for the current
Cassandra Table. Required.
:type create_update_cassandra_table_parameters:
~azure.mgmt.cosmosdb.models.CassandraTableCreateUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CassandraTableGetResults or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.CassandraTableGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_update_cassandra_table(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
create_update_cassandra_table_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.CassandraTableGetResults]:
"""Create or update an Azure Cosmos DB Cassandra Table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param table_name: Cosmos DB table name. Required.
:type table_name: str
:param create_update_cassandra_table_parameters: The parameters to provide for the current
Cassandra Table. Required.
:type create_update_cassandra_table_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CassandraTableGetResults or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.CassandraTableGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_update_cassandra_table(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
create_update_cassandra_table_parameters: Union[_models.CassandraTableCreateUpdateParameters, IO],
**kwargs: Any
) -> LROPoller[_models.CassandraTableGetResults]:
"""Create or update an Azure Cosmos DB Cassandra Table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param table_name: Cosmos DB table name. Required.
:type table_name: str
:param create_update_cassandra_table_parameters: The parameters to provide for the current
Cassandra Table. Is either a CassandraTableCreateUpdateParameters type or a IO type. Required.
:type create_update_cassandra_table_parameters:
~azure.mgmt.cosmosdb.models.CassandraTableCreateUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CassandraTableGetResults or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.CassandraTableGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CassandraTableGetResults] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_update_cassandra_table_initial(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
table_name=table_name,
create_update_cassandra_table_parameters=create_update_cassandra_table_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("CassandraTableGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_update_cassandra_table.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}"
}
def _delete_cassandra_table_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, account_name: str, keyspace_name: str, table_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_cassandra_table_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
table_name=table_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_cassandra_table_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_cassandra_table_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}"
}
@distributed_trace
def begin_delete_cassandra_table(
self, resource_group_name: str, account_name: str, keyspace_name: str, table_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Deletes an existing Azure Cosmos DB Cassandra table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param table_name: Cosmos DB table name. Required.
:type table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_cassandra_table_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
table_name=table_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete_cassandra_table.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}"
}
@distributed_trace
def get_cassandra_table_throughput(
self, resource_group_name: str, account_name: str, keyspace_name: str, table_name: str, **kwargs: Any
) -> _models.ThroughputSettingsGetResults:
"""Gets the RUs per second of the Cassandra table under an existing Azure Cosmos DB database
account with the provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param table_name: Cosmos DB table name. Required.
:type table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ThroughputSettingsGetResults] = kwargs.pop("cls", None)
request = build_get_cassandra_table_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
table_name=table_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_cassandra_table_throughput.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_cassandra_table_throughput.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}/throughputSettings/default"
}
def _update_cassandra_table_throughput_initial(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
update_throughput_parameters: Union[_models.ThroughputSettingsUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.ThroughputSettingsGetResults]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(update_throughput_parameters, (IOBase, bytes)):
_content = update_throughput_parameters
else:
_json = self._serialize.body(update_throughput_parameters, "ThroughputSettingsUpdateParameters")
request = build_update_cassandra_table_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
table_name=table_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_cassandra_table_throughput_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_update_cassandra_table_throughput_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}/throughputSettings/default"
}
@overload
def begin_update_cassandra_table_throughput(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
update_throughput_parameters: _models.ThroughputSettingsUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB Cassandra table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param table_name: Cosmos DB table name. Required.
:type table_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Cassandra table. Required.
:type update_throughput_parameters:
~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update_cassandra_table_throughput(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
update_throughput_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB Cassandra table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param table_name: Cosmos DB table name. Required.
:type table_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Cassandra table. Required.
:type update_throughput_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update_cassandra_table_throughput(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
table_name: str,
update_throughput_parameters: Union[_models.ThroughputSettingsUpdateParameters, IO],
**kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB Cassandra table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param table_name: Cosmos DB table name. Required.
:type table_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Cassandra table. Is either a ThroughputSettingsUpdateParameters type or a IO type.
Required.
:type update_throughput_parameters:
~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ThroughputSettingsGetResults] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._update_cassandra_table_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
table_name=table_name,
update_throughput_parameters=update_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update_cassandra_table_throughput.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}/throughputSettings/default"
}
def _migrate_cassandra_table_to_autoscale_initial(
self, resource_group_name: str, account_name: str, keyspace_name: str, table_name: str, **kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[Optional[_models.ThroughputSettingsGetResults]] = kwargs.pop("cls", None)
request = build_migrate_cassandra_table_to_autoscale_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
table_name=table_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._migrate_cassandra_table_to_autoscale_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_migrate_cassandra_table_to_autoscale_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}/throughputSettings/default/migrateToAutoscale"
}
@distributed_trace
def begin_migrate_cassandra_table_to_autoscale(
self, resource_group_name: str, account_name: str, keyspace_name: str, table_name: str, **kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Migrate an Azure Cosmos DB Cassandra table from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param table_name: Cosmos DB table name. Required.
:type table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ThroughputSettingsGetResults] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._migrate_cassandra_table_to_autoscale_initial(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
table_name=table_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_migrate_cassandra_table_to_autoscale.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}/throughputSettings/default/migrateToAutoscale"
}
def _migrate_cassandra_table_to_manual_throughput_initial(
self, resource_group_name: str, account_name: str, keyspace_name: str, table_name: str, **kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[Optional[_models.ThroughputSettingsGetResults]] = kwargs.pop("cls", None)
request = build_migrate_cassandra_table_to_manual_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
table_name=table_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._migrate_cassandra_table_to_manual_throughput_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_migrate_cassandra_table_to_manual_throughput_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}/throughputSettings/default/migrateToManualThroughput"
}
@distributed_trace
def begin_migrate_cassandra_table_to_manual_throughput(
self, resource_group_name: str, account_name: str, keyspace_name: str, table_name: str, **kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Migrate an Azure Cosmos DB Cassandra table from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param table_name: Cosmos DB table name. Required.
:type table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ThroughputSettingsGetResults] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._migrate_cassandra_table_to_manual_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
table_name=table_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_migrate_cassandra_table_to_manual_throughput.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}/throughputSettings/default/migrateToManualThroughput"
}
@distributed_trace
def list_cassandra_views(
self, resource_group_name: str, account_name: str, keyspace_name: str, **kwargs: Any
) -> Iterable["_models.CassandraViewGetResults"]:
"""Lists the Cassandra materialized views under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CassandraViewGetResults or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.CassandraViewGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CassandraViewListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_cassandra_views_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_cassandra_views.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CassandraViewListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_cassandra_views.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views"
}
@distributed_trace
def get_cassandra_view(
self, resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, **kwargs: Any
) -> _models.CassandraViewGetResults:
"""Gets the Cassandra view under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param view_name: Cosmos DB view name. Required.
:type view_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CassandraViewGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.CassandraViewGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CassandraViewGetResults] = kwargs.pop("cls", None)
request = build_get_cassandra_view_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
view_name=view_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_cassandra_view.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("CassandraViewGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_cassandra_view.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}"
}
def _create_update_cassandra_view_initial(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
view_name: str,
create_update_cassandra_view_parameters: Union[_models.CassandraViewCreateUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.CassandraViewGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.CassandraViewGetResults]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(create_update_cassandra_view_parameters, (IOBase, bytes)):
_content = create_update_cassandra_view_parameters
else:
_json = self._serialize.body(create_update_cassandra_view_parameters, "CassandraViewCreateUpdateParameters")
request = build_create_update_cassandra_view_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
view_name=view_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_update_cassandra_view_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("CassandraViewGetResults", pipeline_response)
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_update_cassandra_view_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}"
}
@overload
def begin_create_update_cassandra_view(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
view_name: str,
create_update_cassandra_view_parameters: _models.CassandraViewCreateUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.CassandraViewGetResults]:
"""Create or update an Azure Cosmos DB Cassandra View.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param view_name: Cosmos DB view name. Required.
:type view_name: str
:param create_update_cassandra_view_parameters: The parameters to provide for the current
Cassandra View. Required.
:type create_update_cassandra_view_parameters:
~azure.mgmt.cosmosdb.models.CassandraViewCreateUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CassandraViewGetResults or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.CassandraViewGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_update_cassandra_view(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
view_name: str,
create_update_cassandra_view_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.CassandraViewGetResults]:
"""Create or update an Azure Cosmos DB Cassandra View.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param view_name: Cosmos DB view name. Required.
:type view_name: str
:param create_update_cassandra_view_parameters: The parameters to provide for the current
Cassandra View. Required.
:type create_update_cassandra_view_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CassandraViewGetResults or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.CassandraViewGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_update_cassandra_view(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
view_name: str,
create_update_cassandra_view_parameters: Union[_models.CassandraViewCreateUpdateParameters, IO],
**kwargs: Any
) -> LROPoller[_models.CassandraViewGetResults]:
"""Create or update an Azure Cosmos DB Cassandra View.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param view_name: Cosmos DB view name. Required.
:type view_name: str
:param create_update_cassandra_view_parameters: The parameters to provide for the current
Cassandra View. Is either a CassandraViewCreateUpdateParameters type or a IO type. Required.
:type create_update_cassandra_view_parameters:
~azure.mgmt.cosmosdb.models.CassandraViewCreateUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CassandraViewGetResults or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.CassandraViewGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CassandraViewGetResults] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_update_cassandra_view_initial(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
view_name=view_name,
create_update_cassandra_view_parameters=create_update_cassandra_view_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("CassandraViewGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_update_cassandra_view.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}"
}
def _delete_cassandra_view_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_cassandra_view_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
view_name=view_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_cassandra_view_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_cassandra_view_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}"
}
@distributed_trace
def begin_delete_cassandra_view(
self, resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Deletes an existing Azure Cosmos DB Cassandra view.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param view_name: Cosmos DB view name. Required.
:type view_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_cassandra_view_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
view_name=view_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete_cassandra_view.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}"
}
@distributed_trace
def get_cassandra_view_throughput(
self, resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, **kwargs: Any
) -> _models.ThroughputSettingsGetResults:
"""Gets the RUs per second of the Cassandra view under an existing Azure Cosmos DB database
account with the provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param view_name: Cosmos DB view name. Required.
:type view_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ThroughputSettingsGetResults] = kwargs.pop("cls", None)
request = build_get_cassandra_view_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
view_name=view_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_cassandra_view_throughput.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_cassandra_view_throughput.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}/throughputSettings/default"
}
def _update_cassandra_view_throughput_initial(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
view_name: str,
update_throughput_parameters: Union[_models.ThroughputSettingsUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.ThroughputSettingsGetResults]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(update_throughput_parameters, (IOBase, bytes)):
_content = update_throughput_parameters
else:
_json = self._serialize.body(update_throughput_parameters, "ThroughputSettingsUpdateParameters")
request = build_update_cassandra_view_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
view_name=view_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_cassandra_view_throughput_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_update_cassandra_view_throughput_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}/throughputSettings/default"
}
@overload
def begin_update_cassandra_view_throughput(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
view_name: str,
update_throughput_parameters: _models.ThroughputSettingsUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB Cassandra view.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param view_name: Cosmos DB view name. Required.
:type view_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Cassandra view. Required.
:type update_throughput_parameters:
~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update_cassandra_view_throughput(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
view_name: str,
update_throughput_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB Cassandra view.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param view_name: Cosmos DB view name. Required.
:type view_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Cassandra view. Required.
:type update_throughput_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update_cassandra_view_throughput(
self,
resource_group_name: str,
account_name: str,
keyspace_name: str,
view_name: str,
update_throughput_parameters: Union[_models.ThroughputSettingsUpdateParameters, IO],
**kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB Cassandra view.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param view_name: Cosmos DB view name. Required.
:type view_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Cassandra view. Is either a ThroughputSettingsUpdateParameters type or a IO type.
Required.
:type update_throughput_parameters:
~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ThroughputSettingsGetResults] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._update_cassandra_view_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
view_name=view_name,
update_throughput_parameters=update_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update_cassandra_view_throughput.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}/throughputSettings/default"
}
def _migrate_cassandra_view_to_autoscale_initial(
self, resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, **kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[Optional[_models.ThroughputSettingsGetResults]] = kwargs.pop("cls", None)
request = build_migrate_cassandra_view_to_autoscale_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
view_name=view_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._migrate_cassandra_view_to_autoscale_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_migrate_cassandra_view_to_autoscale_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}/throughputSettings/default/migrateToAutoscale"
}
@distributed_trace
def begin_migrate_cassandra_view_to_autoscale(
self, resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, **kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Migrate an Azure Cosmos DB Cassandra view from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param view_name: Cosmos DB view name. Required.
:type view_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ThroughputSettingsGetResults] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._migrate_cassandra_view_to_autoscale_initial(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
view_name=view_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_migrate_cassandra_view_to_autoscale.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}/throughputSettings/default/migrateToAutoscale"
}
def _migrate_cassandra_view_to_manual_throughput_initial(
self, resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, **kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[Optional[_models.ThroughputSettingsGetResults]] = kwargs.pop("cls", None)
request = build_migrate_cassandra_view_to_manual_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
view_name=view_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._migrate_cassandra_view_to_manual_throughput_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if response.status_code == 202:
response_headers["azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("azure-AsyncOperation")
)
response_headers["location"] = self._deserialize("str", response.headers.get("location"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_migrate_cassandra_view_to_manual_throughput_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}/throughputSettings/default/migrateToManualThroughput"
}
@distributed_trace
def begin_migrate_cassandra_view_to_manual_throughput(
self, resource_group_name: str, account_name: str, keyspace_name: str, view_name: str, **kwargs: Any
) -> LROPoller[_models.ThroughputSettingsGetResults]:
"""Migrate an Azure Cosmos DB Cassandra view from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param keyspace_name: Cosmos DB keyspace name. Required.
:type keyspace_name: str
:param view_name: Cosmos DB view name. Required.
:type view_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ThroughputSettingsGetResults] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._migrate_cassandra_view_to_manual_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
keyspace_name=keyspace_name,
view_name=view_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_migrate_cassandra_view_to_manual_throughput.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/views/{viewName}/throughputSettings/default/migrateToManualThroughput"
}
|
40e7534ba2433f4959a544c373a2a4e32dcc7cf1
|
1664bc3e55c0e006c8bbf8671a2ba0043dc0203c
|
/mpf/devices/multiball.py
|
74d3b6ccf2851b1bbe797f00843d2fe3d0346375
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
missionpinball/mpf
|
d426b0b1b865a138f169aaf852741f39a880edf2
|
9f90c8b1586363b65340017bfa3af5d56d32c6d9
|
refs/heads/dev
| 2023-07-26T21:31:11.581205
| 2023-07-15T17:06:04
| 2023-07-15T17:06:04
| 21,267,545
| 191
| 173
|
MIT
| 2023-09-14T06:07:45
| 2014-06-27T07:26:26
|
Python
|
UTF-8
|
Python
| false
| false
| 13,771
|
py
|
multiball.py
|
"""Contains the MultiBall device class."""
from mpf.core.enable_disable_mixin import EnableDisableMixin
from mpf.core.delays import DelayManager
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.events import event_handler
from mpf.core.mode_device import ModeDevice
from mpf.core.placeholder_manager import NativeTypeTemplate
from mpf.core.system_wide_device import SystemWideDevice
@DeviceMonitor("shoot_again", "grace_period", "hurry_up", "balls_added_live", "balls_live_target")
class Multiball(EnableDisableMixin, SystemWideDevice, ModeDevice):
"""Multiball device for MPF."""
config_section = 'multiballs'
collection = 'multiballs'
class_label = 'multiball'
__slots__ = ["ball_locks", "source_playfield", "delay", "balls_added_live", "balls_live_target", "shoot_again",
"grace_period", "hurry_up"]
def __init__(self, machine, name):
"""Initialise multiball."""
self.ball_locks = None
self.source_playfield = None
super().__init__(machine, name)
self.delay = DelayManager(machine)
self.balls_added_live = 0
self.balls_live_target = 0
self.shoot_again = False
self.grace_period = False
self.hurry_up = False
@property
def can_exist_outside_of_game(self):
"""Return true if this device can exist outside of a game."""
return True
def device_removed_from_mode(self, mode):
"""Disable and stop mb when mode stops."""
super().device_removed_from_mode(mode)
# also stop mb if shoot again is specified (aka the MB is currently running)
if self.shoot_again:
self.stop()
async def _initialize(self):
await super()._initialize()
self.ball_locks = self.config['ball_locks']
self.source_playfield = self.config['source_playfield']
if isinstance(self.config['ball_count'], NativeTypeTemplate):
ball_count = self.config['ball_count'].evaluate([])
else:
ball_count = None
if self.config['ball_count_type'] == "total" and ball_count is not None and \
ball_count <= 1:
self.raise_config_error("ball_count should be at least 2 for a multiball to have an effect when "
"ball_count_type is set to total.", 1)
elif self.config['ball_count_type'] == "add" and ball_count is not None and \
ball_count <= 0:
self.raise_config_error("ball_count should be at least 1 for a multiball to have an effect when "
"ball_count_type is set to add.", 2)
@classmethod
def prepare_config(cls, config, is_mode_config):
"""Add default enable_events and disable_events outside mode."""
if not is_mode_config:
if 'enable_events' not in config:
config['enable_events'] = 'ball_started'
if 'disable_events' not in config:
config['disable_events'] = 'ball_will_end'
return super().prepare_config(config, is_mode_config)
def _handle_balls_in_play_and_balls_live(self):
ball_count = self.config['ball_count'].evaluate([])
balls_to_replace = self.machine.game.balls_in_play if self.config['replace_balls_in_play'] else 0
self.debug_log("Going to add an additional {} balls for replace_balls_in_play".format(balls_to_replace))
if self.config['ball_count_type'] == "total":
# policy: total balls
if ball_count > self.machine.game.balls_in_play:
self.balls_added_live = ball_count - self.machine.game.balls_in_play
self.machine.game.balls_in_play = ball_count
self.balls_live_target = ball_count
else:
# policy: add balls
self.balls_added_live = ball_count
self.machine.game.balls_in_play += self.balls_added_live
self.balls_live_target = self.machine.game.balls_in_play
self.balls_added_live += balls_to_replace
@event_handler(10)
def event_start(self, **kwargs):
"""Event handler for start event."""
del kwargs
self.start()
def start(self):
"""Start multiball."""
if not self.enabled:
return
if self.balls_live_target > 0:
self.debug_log("Cannot start MB because %s are still in play",
self.balls_live_target)
return
self.shoot_again = True
self._handle_balls_in_play_and_balls_live()
self.debug_log("Starting multiball with %s balls (added %s)", self.balls_live_target, self.balls_added_live)
balls_added = 0
# eject balls from locks
for device in self.ball_locks:
balls_to_release = max(min(device.available_balls, self.balls_added_live - balls_added), 0)
self.source_playfield.add_ball(balls=balls_to_release, source_device=device)
balls_added += balls_to_release
# request remaining balls
if self.balls_added_live - balls_added > 0:
self.source_playfield.add_ball(balls=self.balls_added_live - balls_added)
shoot_again_ms = self.config['shoot_again'].evaluate([])
if not shoot_again_ms:
# No shoot again. Just stop multiball right away
self.stop()
else:
# Enable shoot again
self.machine.events.add_handler('ball_drain',
self._ball_drain_shoot_again,
priority=1000)
self._timer_start()
self.machine.events.post("multiball_" + self.name + "_started",
balls=self.balls_live_target)
'''event: multiball_(name)_started
desc: The multiball called (name) has just started.
args:
balls: The number of balls in this multiball
'''
def _timer_start(self) -> None:
"""Start the timer.
This is started when multiball starts if configured.
"""
self.machine.events.post('ball_save_{}_timer_start'.format(self.name))
'''event: ball_save_(name)_timer_start
desc: The multiball ball save called (name) has just start its countdown timer.
'''
shoot_again_ms = self.config['shoot_again'].evaluate([])
grace_period_ms = self.config['grace_period'].evaluate([])
hurry_up_time_ms = self.config['hurry_up_time'].evaluate([])
self._start_shoot_again(shoot_again_ms, grace_period_ms, hurry_up_time_ms)
def _start_shoot_again(self, shoot_again_ms, grace_period_ms, hurry_up_time_ms):
"""Set callbacks for shoot again, grace period, and hurry up, if values above 0 are provided.
This is started for both beginning multiball ball save and add a ball ball save
"""
if shoot_again_ms > 0:
self.debug_log('Starting ball save timer: %ss',
shoot_again_ms)
self.delay.add(name='disable_shoot_again',
ms=(shoot_again_ms +
grace_period_ms),
callback=self.stop)
if grace_period_ms > 0:
self.grace_period = True
self.delay.add(name='grace_period',
ms=shoot_again_ms,
callback=self._grace_period)
if hurry_up_time_ms > 0:
self.hurry_up = True
self.delay.add(name='hurry_up',
ms=(shoot_again_ms -
hurry_up_time_ms),
callback=self._hurry_up)
def _hurry_up(self) -> None:
self.debug_log("Starting Hurry Up")
self.hurry_up = False
self.machine.events.post('multiball_{}_hurry_up'.format(self.name))
'''event: multiball_(name)_hurry_up
desc: The multiball ball save called (name) has just entered its hurry up mode.
'''
def _grace_period(self) -> None:
self.debug_log("Starting Grace Period")
self.grace_period = False
self.machine.events.post('multiball_{}_grace_period'.format(self.name))
'''event: multiball_(name)_grace_period
desc: The multiball ball save called (name) has just entered its grace period
time.
'''
def _ball_drain_shoot_again(self, balls, **kwargs):
del kwargs
balls_to_safe = self.balls_live_target - self.machine.game.balls_in_play + balls
if balls_to_safe <= 0:
return {'balls': balls}
if balls_to_safe > balls:
balls_to_safe = balls
self.machine.events.post("multiball_" + self.name + "_shoot_again", balls=balls_to_safe)
'''event: multiball_(name)_shoot_again
desc: A ball has drained during the multiball called (name) while the
ball save timer for that multiball was running, so a ball (or balls)
will be saved and re-added into play.
args:
balls: The number of balls that are being saved.
'''
self.debug_log("Ball drained during MB. Requesting a new one")
self.source_playfield.add_ball(balls=balls_to_safe)
return {'balls': balls - balls_to_safe}
def _ball_drain_count_balls(self, balls, **kwargs):
del kwargs
self.machine.events.post("multiball_{}_ball_lost".format(self.name))
'''event: multiball_(name)_lost_ball
desc: The multiball called (name) has lost a ball after ball save expired.
'''
if not self.machine.game or self.machine.game.balls_in_play - balls < 1:
self.balls_added_live = 0
self.balls_live_target = 0
self.machine.events.remove_handler(self._ball_drain_count_balls)
self.machine.events.post("multiball_{}_ended".format(self.name))
'''event: multiball_(name)_ended
desc: The multiball called (name) has just ended.
'''
self.debug_log("Ball drained. MB ended.")
@event_handler(5)
def event_stop(self, **kwargs):
"""Event handler for stop event."""
del kwargs
self.stop()
def stop(self):
"""Stop shoot again."""
self.debug_log("Stopping shoot again of multiball")
self.shoot_again = False
# disable shoot again
self.machine.events.remove_handler(self._ball_drain_shoot_again)
if self.grace_period:
self.machine.events.remove_handler(self._grace_period)
self._grace_period()
if self.hurry_up:
self.machine.events.remove_handler(self._hurry_up)
self._hurry_up()
self.machine.events.post("multiball_" + self.name + "_shoot_again_ended")
'''event: multiball_(name)_shoot_again_ended
desc: Shoot again for multiball (name) has ended.
'''
# add handler for ball_drain until self.balls_ejected are drained
self.machine.events.remove_handler(self._ball_drain_count_balls)
self.machine.events.add_handler('ball_drain', self._ball_drain_count_balls)
@event_handler(8)
def event_add_a_ball(self, **kwargs):
"""Event handler for add_a_ball event."""
del kwargs
self.add_a_ball()
def add_a_ball(self):
"""Add a ball if multiball has started."""
if self.balls_live_target > 0:
self.debug_log("Adding a ball.")
self.balls_live_target += 1
self.balls_added_live += 1
self.machine.game.balls_in_play += 1
self.source_playfield.add_ball(balls=1)
self._add_a_ball_timer_start()
def _add_a_ball_timer_start(self) -> None:
"""Start the timer for add a ball ball save.
This is started when multiball add a ball is triggered if configured,
and the default timer is not still running.
"""
if self.shoot_again:
# if main ball save timer is running, don't run this timer
return
self.shoot_again = True
shoot_again_ms = self.config['add_a_ball_shoot_again'].evaluate([])
if not shoot_again_ms:
# No shoot again. Just stop multiball right away
self.stop()
return
# Enable shoot again
self.machine.events.add_handler('ball_drain',
self._ball_drain_shoot_again,
priority=1000)
self.machine.events.post('ball_save_{}_add_a_ball_timer_start'.format(self.name))
'''event: ball_save_(name)_add_a_ball_timer_start
desc: The multiball add a ball ball save called (name) has just start its countdown timer.
'''
grace_period_ms = self.config['add_a_ball_grace_period'].evaluate([])
hurry_up_time_ms = self.config['add_a_ball_hurry_up_time'].evaluate([])
self._start_shoot_again(shoot_again_ms, grace_period_ms, hurry_up_time_ms)
@event_handler(9)
def event_start_or_add_a_ball(self, **kwargs):
"""Event handler for start_or_add_a_ball event."""
del kwargs
self.start_or_add_a_ball()
def start_or_add_a_ball(self):
"""Start multiball or add a ball if multiball has started."""
if self.balls_live_target > 0:
self.add_a_ball()
else:
self.start()
@event_handler(2)
def event_reset(self, **kwargs):
"""Event handler for reset event."""
del kwargs
self.reset()
def reset(self):
"""Reset the multiball and disable it."""
self.disable()
self.shoot_again = False
self.balls_added_live = 0
|
20216ab628298680586bd2c0eef462bb294e21f4
|
16ecadb9988cb16a7c55ce586b7f5155c6f1997c
|
/pyrpl/software_modules/lockbox/models/__init__.py
|
314ffde374b9e2fe6bf8e897a3ed4935260a193b
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lneuhaus/pyrpl
|
00bd04e714a9f934e5fdcd63e9c246abbef86a57
|
8e2f39d1ad2f3351ef31009e810cf9ab6a750693
|
refs/heads/main
| 2023-08-08T16:22:50.453213
| 2023-08-01T06:46:19
| 2023-08-01T06:46:19
| 58,811,185
| 120
| 104
|
MIT
| 2023-08-09T16:49:18
| 2016-05-14T14:09:14
|
Python
|
UTF-8
|
Python
| false
| false
| 816
|
py
|
__init__.py
|
from .interferometer import *
from .fabryperot import *
from .linear import *
from .custom_lockbox_example import *
from .pll import *
# try to import user models if applicable
import sys, os
from .... import user_lockbox_dir
sys.path.append(user_lockbox_dir)
usermodels = []
module = None
try:
for module in os.listdir(user_lockbox_dir):
if module == '__init__.py' or module[-3:] != '.py':
continue
usermodels.append(__import__(module[:-3], locals(), globals(), [], 0))
logger.debug("Custom user models from %s were successfully imported!"%module)
del module
except KeyError:
logger.warning("An error occured during the import of user model files! "
"The exception occured during the import of module '%s'. ",
module)
raise
|
0012d710762f7d740a5b9ad4df79495ab76f5b46
|
f26a731da67963adc1152318053f148ec24752fc
|
/Yank/tests/test_experiment.py
|
840737aefa0832ecc1e22853d2306735ca877925
|
[
"MIT"
] |
permissive
|
choderalab/yank
|
7fa580ac7f7a69ddd20bc6431375d73dd0ae5dc9
|
c06059045bcf86d610f2e39c6db3944994b9f392
|
refs/heads/master
| 2023-07-27T09:56:42.719221
| 2022-07-20T22:51:16
| 2022-07-20T22:51:16
| 13,779,937
| 167
| 85
|
MIT
| 2023-08-25T23:57:49
| 2013-10-22T17:01:36
|
Python
|
UTF-8
|
Python
| false
| false
| 149,311
|
py
|
test_experiment.py
|
#!/usr/bin/env python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
Test YAML functions.
"""
# =============================================================================================
# GLOBAL IMPORTS
# =============================================================================================
import itertools
from pprint import pformat
import shutil
import tempfile
import textwrap
import time
import typing
import unittest
import mdtraj
from nose.plugins.attrib import attr
from nose.tools import assert_raises, assert_equal, assert_raises_regexp
from yank.experiment import *
# silence the citations at a global level
mmtools.multistate.MultiStateSampler._global_citation_silence = True
# ==============================================================================
# Subroutines for testing
# ==============================================================================
standard_protocol = """
absolute-binding:
complex:
alchemical_path:
lambda_electrostatics: [1.0, 0.5, 0.0]
lambda_sterics: [1.0, 0.5, 0.0]
solvent:
alchemical_path:
lambda_electrostatics: [1.0, 0.5, 0.0]
lambda_sterics: [1.0, 0.5, 0.0]"""
def indent(input_string):
"""Put 4 extra spaces in front of every line."""
return '\n '.join(input_string.split('\n'))
def examples_paths():
"""Return the absolute path to the Yank examples relevant to tests."""
data_dir = utils.get_data_filename(os.path.join('tests', 'data'))
p_xylene_dir = os.path.join(data_dir, 'p-xylene-implicit')
p_xylene_gro_dir = os.path.join(data_dir, 'p-xylene-gromacs-example')
ben_tol_dir = os.path.join(data_dir, 'benzene-toluene-explicit')
abl_imatinib_dir = os.path.join(data_dir, 'abl-imatinib-explicit')
tol_dir = os.path.join(data_dir, 'toluene-explicit')
benz_tol_dir = os.path.join(data_dir, 'benzene-toluene-standard-state')
paths = dict()
paths['lysozyme'] = os.path.join(p_xylene_dir, '181L-pdbfixer.pdb')
paths['p-xylene'] = os.path.join(p_xylene_dir, 'p-xylene.mol2')
paths['benzene'] = os.path.join(ben_tol_dir, 'benzene.tripos.mol2')
paths['toluene'] = os.path.join(ben_tol_dir, 'toluene.tripos.mol2')
paths['abl'] = os.path.join(abl_imatinib_dir, '2HYY-pdbfixer.pdb')
paths['imatinib'] = os.path.join(abl_imatinib_dir, 'STI02.mol2')
paths['bentol-complex'] = [os.path.join(ben_tol_dir, 'complex.prmtop'),
os.path.join(ben_tol_dir, 'complex.inpcrd')]
paths['bentol-solvent'] = [os.path.join(ben_tol_dir, 'solvent.prmtop'),
os.path.join(ben_tol_dir, 'solvent.inpcrd')]
paths['pxylene-complex'] = [os.path.join(p_xylene_gro_dir, 'complex.top'),
os.path.join(p_xylene_gro_dir, 'complex.gro')]
paths['pxylene-solvent'] = [os.path.join(p_xylene_gro_dir, 'solvent.top'),
os.path.join(p_xylene_gro_dir, 'solvent.gro')]
paths['pxylene-gro-include'] = os.path.join(p_xylene_gro_dir, 'top')
paths['toluene-solvent'] = [os.path.join(tol_dir, 'solvent.pdb'),
os.path.join(tol_dir, 'solvent.xml')]
paths['toluene-vacuum'] = [os.path.join(tol_dir, 'vacuum.pdb'),
os.path.join(tol_dir, 'vacuum.xml')]
paths['benzene-toluene-boxless'] = [os.path.join(benz_tol_dir, 'standard_state_complex_boxless.inpcrd'),
os.path.join(benz_tol_dir, 'standard_state_complex.prmtop')]
paths['benzene-toluene-nan'] = [os.path.join(benz_tol_dir, 'standard_state_complex_nan.inpcrd'),
os.path.join(benz_tol_dir, 'standard_state_complex.prmtop')]
return paths
def yank_load(script):
"""Shortcut to load a string YAML script with YankLoader."""
return yaml.load(textwrap.dedent(script), Loader=YankLoader)
def get_template_script(output_dir='.', keep_schrodinger=False, keep_openeye=False,
systems='all'):
"""Return a YAML template script as a dict.
Parameters
----------
output_dir : str, optional
The YANK output directory to set in the YAML options.
keep_schrodinger : bool, optional
If False, removes the molecules that depend on the Schrodinger
toolkit. Default is False.
keep_openeye : bool, optional
If False, removes the molecules that depend on the OpenEye
toolkit. Default is False.
systems : List[str], optional
Limits the systems in the YAML to those identified by the given
IDs. If 'all', all systems are included in the script, which means
that the setup pipeline will build them all.
"""
paths = examples_paths()
template_script = """
---
options:
output_dir: {output_dir}
default_number_of_iterations: 0
temperature: 300*kelvin
pressure: 1*atmosphere
minimize: no
verbose: no
default_nsteps_per_iteration: 1
molecules:
benzene:
filepath: {benzene_path}
antechamber: {{charge_method: bcc}}
benzene-epik0:
filepath: {benzene_path}
epik:
select: 0
antechamber: {{charge_method: bcc}}
benzene-epikcustom:
filepath: {benzene_path}
epik:
select: 0
ph: 7.0
tautomerize: yes
antechamber: {{charge_method: bcc}}
p-xylene:
filepath: {pxylene_path}
antechamber: {{charge_method: bcc}}
p-xylene-name:
name: p-xylene
openeye: {{quacpac: am1-bcc}}
antechamber: {{charge_method: null}}
toluene:
filepath: {toluene_path}
antechamber: {{charge_method: bcc}}
toluene-smiles:
smiles: Cc1ccccc1
antechamber: {{charge_method: bcc}}
toluene-name:
name: toluene
antechamber: {{charge_method: bcc}}
Abl:
filepath: {abl_path}
T4Lysozyme:
filepath: {lysozyme_path}
solvents:
vacuum:
nonbonded_method: NoCutoff
GBSA-OBC2:
nonbonded_method: NoCutoff
implicit_solvent: OBC2
PME:
nonbonded_method: PME
nonbonded_cutoff: 1*nanometer
clearance: 10*angstroms
positive_ion: Na+
negative_ion: Cl-
leap:
parameters: [leaprc.water.tip4pew]
systems:
explicit-system:
receptor: benzene
ligand: toluene
solvent: PME
leap:
parameters: [leaprc.protein.ff14SB, leaprc.gaff]
implicit-system:
receptor: T4Lysozyme
ligand: p-xylene
solvent: GBSA-OBC2
leap:
parameters: [leaprc.protein.ff14SB, leaprc.gaff]
hydration-system:
solute: toluene
solvent1: PME
solvent2: vacuum
leap:
parameters: [leaprc.protein.ff14SB, leaprc.gaff]
mcmc_moves:
single:
type: LangevinSplittingDynamicsMove
sequence:
type: SequenceMove
move_list:
- type: MCDisplacementMove
- type: LangevinDynamicsMove
samplers:
repex:
type: ReplicaExchangeSampler
sams:
type: SAMSSampler
protocols:
absolute-binding:
complex:
alchemical_path:
lambda_electrostatics: [1.0, 0.5, 0.0]
lambda_sterics: [1.0, 0.5, 0.0]
solvent:
alchemical_path:
lambda_electrostatics: [1.0, 0.5, 0.0]
lambda_sterics: [1.0, 0.5, 0.0]
hydration-protocol:
solvent1:
alchemical_path:
lambda_electrostatics: [1.0, 0.0]
lambda_sterics: [1.0, 0.0]
solvent2:
alchemical_path:
lambda_electrostatics: [1.0, 0.0]
lambda_sterics: [1.0, 1.0]
experiments:
system: explicit-system
protocol: absolute-binding
""".format(output_dir=output_dir, benzene_path=paths['benzene'],
pxylene_path=paths['p-xylene'], toluene_path=paths['toluene'],
abl_path=paths['abl'], lysozyme_path=paths['lysozyme'])
# Load script as dictionary.
script_dict = yank_load(template_script)
# Find all molecules that require optional tools.
molecules_to_remove = []
for molecule_id, molecule_description in script_dict['molecules'].items():
need_schrodinger = 'epik' in molecule_description
need_openeye = any([k in molecule_description for k in ['name', 'smiles', 'openeye']])
if ((need_schrodinger and not keep_schrodinger) or
(need_openeye and not keep_openeye)):
molecules_to_remove.append(molecule_id)
# Remove molecules.
for molecule_id in molecules_to_remove:
del script_dict['molecules'][molecule_id]
# Remove systems.
if systems != 'all':
systems_to_remove = [s for s in script_dict['systems'] if s not in systems]
for system_id in systems_to_remove:
del script_dict['systems'][system_id]
return script_dict
def get_functionality_script(output_directory=',', number_of_iter=0, experiment_repeats=1, number_nan_repeats=0):
"""
A computationally simple pre-setup system which can be loaded to manipulate a formal experiment
Should not be used for scientific testing per-se, but can be used to test functional components of experiment
Parameters
==========
output_directory : str, Optional
Output directory to set in script
number_of_iter : int Optional, Default: 1
Number of iterations to run
experiment_repeats : int, Optional, Default: 1
Number of times the experiment is repeated in a "experiments" header
number_nan_repeats : int, Optional, Default: 0
Number of times the experiment with a NaN is repeated, this will be added to the end of the stack
"""
paths = examples_paths()
template_script = """
---
options:
minimize: no
verbose: no
output_dir: {output_directory}
default_number_of_iterations: {number_of_iter}
default_nsteps_per_iteration: 10
temperature: 300*kelvin
pressure: null
anisotropic_dispersion_cutoff: null
solvents:
vacuum:
nonbonded_method: NoCutoff
systems:
premade:
phase1_path: {boxless_path}
phase2_path: {boxless_path}
ligand_dsl: resname ene
solvent: vacuum
premade_nan:
phase1_path: {nan_path}
phase2_path: {nan_path}
ligand_dsl: resname ene
solvent: vacuum
protocols:
absolute-binding:
complex:
alchemical_path:
lambda_electrostatics: [0.0, 0.0]
lambda_sterics: [0.0, 0.0]
solvent:
alchemical_path:
lambda_electrostatics: [1.0, 1.0]
lambda_sterics: [1.0, 1.0]
the_exp:
system: premade
protocol: absolute-binding
restraint:
type: FlatBottom
the_nan_exp:
system: premade_nan
protocol: absolute-binding
restraint:
type: FlatBottom
experiments: [{repeating}]
"""
repeating_string = ', '.join(['the_exp'] * experiment_repeats)
repeating_nan_string = ', '.join(['the_nan_exp'] * number_nan_repeats)
if repeating_string != '':
repeating_string += ', '
repeating_string += repeating_nan_string
return yank_load(template_script.format(output_directory=output_directory,
number_of_iter=number_of_iter,
repeating=repeating_string,
boxless_path=paths['benzene-toluene-boxless'],
nan_path=paths['benzene-toluene-nan']))
# ==============================================================================
# YAML parsing and validation
# ==============================================================================
def test_yaml_parsing():
"""Check that YAML file is parsed correctly."""
# Parser handles no options
yaml_content = """
---
test: 2
"""
exp_builder = ExperimentBuilder(textwrap.dedent(yaml_content))
# The plus 1 is because we overwrite disable_alchemical_dispersion_correction.
expected_n_options = (len(exp_builder.GENERAL_DEFAULT_OPTIONS) +
len(exp_builder.EXPERIMENT_DEFAULT_OPTIONS) + 1)
assert len(exp_builder._options) == expected_n_options
# Correct parsing
yaml_content = """
---
options:
verbose: true
resume_setup: true
resume_simulation: true
output_dir: /path/to/output/
setup_dir: /path/to/output/setup/
experiments_dir: /path/to/output/experiments/
platform: CPU
precision: mixed
switch_experiment_interval: -2.0
processes_per_experiment: 2
max_n_contexts: 9
switch_phase_interval: 32
temperature: 300*kelvin
pressure: null
constraints: AllBonds
hydrogen_mass: 2*amus
randomize_ligand: yes
randomize_ligand_sigma_multiplier: 1.0e-2
randomize_ligand_close_cutoff: 1.5 * angstrom
anisotropic_dispersion_cutoff: null
default_timestep: 2.0 * femtosecond
default_nsteps_per_iteration: 2500
default_number_of_iterations: .inf
equilibration_timestep: 1.0 * femtosecond
number_of_equilibration_iterations: 100
minimize: False
minimize_tolerance: 1.0 * kilojoules_per_mole / nanometers
minimize_max_iterations: 0
annihilate_sterics: no
annihilate_electrostatics: true
alchemical_pme_treatment: direct-space
disable_alchemical_dispersion_correction: no
"""
exp_builder = ExperimentBuilder(textwrap.dedent(yaml_content))
assert len(exp_builder._options) == 33
# The global context cache has been set.
assert mmtools.cache.global_context_cache.capacity == 9
# Check correct types
assert exp_builder._options['output_dir'] == '/path/to/output/'
assert exp_builder._options['pressure'] is None
assert exp_builder._options['constraints'] == openmm.app.AllBonds
assert exp_builder._options['anisotropic_dispersion_cutoff'] is None
assert exp_builder._options['default_timestep'] == 2.0 * unit.femtoseconds
assert exp_builder._options['randomize_ligand_sigma_multiplier'] == 1.0e-2
assert exp_builder._options['default_nsteps_per_iteration'] == 2500
assert type(exp_builder._options['default_nsteps_per_iteration']) is int
assert exp_builder._options['default_number_of_iterations'] == float('inf')
assert exp_builder._options['number_of_equilibration_iterations'] == 100
assert type(exp_builder._options['number_of_equilibration_iterations']) is int
assert exp_builder._options['minimize'] is False
def test_paths_properties():
"""Test that setup directory is updated correctly when changing output paths."""
template_script = get_template_script(output_dir='output1')
template_script['options']['setup_dir'] = 'setup1'
exp_builder = ExperimentBuilder(template_script)
# The database path is configured correctly.
assert exp_builder._db.setup_dir == os.path.join('output1', 'setup1')
# Updating paths also updates the database main directory.
exp_builder.output_dir = 'output2'
exp_builder.setup_dir = 'setup2'
assert exp_builder._db.setup_dir == os.path.join('output2', 'setup2')
def test_online_reads_checkpoint():
"""Test that online analysis reads the checkpoint correctly in all cases"""
current_log_level = logger.level
logger.setLevel(logging.ERROR) # Temporarily suppress some of the logging output
raw_template_script = get_template_script(systems=['explicit-system'])
# Pair down the processing
allowed_molecules = [raw_template_script['systems']['explicit-system']['receptor'],
raw_template_script['systems']['explicit-system']['ligand']]
popables = []
for molecule in raw_template_script['molecules'].keys():
if molecule not in allowed_molecules:
popables.append(molecule)
for popable in popables:
raw_template_script['molecules'].pop(popable)
raw_template_script.pop('samplers')
sampler_entry = {'type': 'SAMSSampler'}
sampler = {'samplers': {'sams': sampler_entry}}
base_template_script = {**raw_template_script, **sampler}
base_template_script['experiments']['sampler'] = 'sams'
def spinup_sampler(script):
with mmtools.utils.temporary_directory() as tmp_dir:
template_script['options']['output_dir'] = tmp_dir
exp_builder = ExperimentBuilder(script)
experiment = [ex for ex in exp_builder.build_experiments()][0]
sampler = experiment.phases[0].sampler
return sampler
# Testing Note: All the test numbers for the checkpoint_interval below are different from the default and each
# other to ensure making changes actually has the intended effect odd settings get carried over between tests
# respectively.
# Test that setting "checkpoint" for online analysis gets the checkpoint interval default
template_script = copy.deepcopy(base_template_script)
template_script['options'].pop("checkpoint_interval", None)
template_script['samplers']['sams']['online_analysis_interval'] = "checkpoint"
sampler = spinup_sampler(template_script)
assert sampler.online_analysis_interval == AlchemicalPhaseFactory.DEFAULT_OPTIONS['checkpoint_interval']
# Test that setting "checkpoint" for online analysis gets the checkpoint interval that is set
template_script['options']["checkpoint_interval"] = 10
sampler = spinup_sampler(template_script)
assert sampler.online_analysis_interval == 10
# Test that not setting online analysis gets the checkpoint interval default
template_script = copy.deepcopy(base_template_script)
template_script['options'].pop("checkpoint_interval", None)
template_script['samplers']['sams'].pop('online_analysis_interval', None)
sampler = spinup_sampler(template_script)
assert sampler.online_analysis_interval == AlchemicalPhaseFactory.DEFAULT_OPTIONS['checkpoint_interval']
# Test that not setting online analysis gets the checkpoint interval that is set
template_script['options']["checkpoint_interval"] = 100
sampler = spinup_sampler(template_script)
assert sampler.online_analysis_interval == 100
# Test that setting online analysis still returns the set value
template_script = copy.deepcopy(base_template_script)
template_script['options']["checkpoint_interval"] = 70
template_script['samplers']['sams']['online_analysis_interval'] = 13
sampler = spinup_sampler(template_script)
assert sampler.online_analysis_interval == 13
# Test that setting online analysis to None keeps online analysis None
template_script = copy.deepcopy(base_template_script)
template_script['options']["checkpoint_interval"] = 80
template_script['samplers']['sams']['online_analysis_interval'] = None
sampler = spinup_sampler(template_script)
assert sampler.online_analysis_interval is None
# Test that not setting a sampler gets a the checkpoint interval for online analysis
template_script = copy.deepcopy(base_template_script)
template_script['options']["checkpoint_interval"] = 90
template_script.pop('samplers', None)
template_script['experiments'].pop('sampler', None)
sampler = spinup_sampler(template_script)
assert sampler.online_analysis_interval == 90
# Test that setting the checkpoint_interval in *experiments:options* block correctly sets the checkpoint interval
template_script = copy.deepcopy(base_template_script)
template_script['options']["checkpoint_interval"] = 110
template_script.pop('samplers', None)
opts = {'checkpoint_interval': 120}
template_script['experiments'].pop('sampler', None)
template_script['experiments']['options'] = opts
sampler = spinup_sampler(template_script)
assert sampler.online_analysis_interval == 120
logger.setLevel(current_log_level) # Reset logging to normal
def test_processes_per_experiment():
"""Test the determination of processes_per_experiment option."""
# Create a script with 4 experiments.
template_script = get_template_script()
template_script['experiment1'] = copy.deepcopy(template_script['experiments'])
template_script['experiment1']['system'] = utils.CombinatorialLeaf(['explicit-system', 'implicit-system'])
# The first two experiments have less number of states than the other two.
template_script['experiment1']['protocol'] = 'hydration-protocol'
template_script['experiment2'] = copy.deepcopy(template_script['experiments'])
template_script['experiment2']['system'] = 'hydration-system'
# The last experiment uses SAMS.
template_script['experiment2']['sampler'] = utils.CombinatorialLeaf(['repex', 'sams'])
template_script['experiments'] = ['experiment1', 'experiment2']
exp_builder = ExperimentBuilder(template_script)
experiments = list(exp_builder._expand_experiments())
# The default is auto.
assert exp_builder._options['processes_per_experiment'] == 'auto'
# When there is no MPI environment the calculation is serial.
assert exp_builder._get_experiment_mpi_group_size(experiments) is None
# In an MPI environment, the MPI communicator is split according
# to the number of experiments still have to be completed. Each
# test case is pair (experiments, MPICOMM size, expected return value).
test_cases = [
(experiments, 5, 1), # This contains a SAMS sampler so only 1 MPI process is used.
(experiments[:-1], 4, [1, 1, 2]), # 3 repex samples, but last experiment has more intermediate states.
(experiments[1:-1], 4, [2, 2]), # 2 repex samples on 4 MPI processes.
(experiments[1:-1], 6, [3, 3]), # 2 repex samples on 4 MPI processes.
(list(reversed(experiments[1:-1])), 3, [2, 1]), # 2 repex samples on 3 MPI processes.
(experiments[:-1], 2, 1), # Less MPI processes than experiments, split everything.
]
for i, (exp, mpicomm_size, expected_result) in enumerate(test_cases):
with mpiplus.mpiplus._simulated_mpi_environment(size=mpicomm_size):
result = exp_builder._get_experiment_mpi_group_size(exp)
err_msg = ('experiments: {}\nMPICOMM size: {}\nexpected result: {}'
'\nresult: {}').format(*test_cases[i], result)
assert result == expected_result, err_msg
# Test manual setting of processes_per_experiments.
test_cases = [2, None]
for processes_per_experiment in test_cases:
exp_builder._options['processes_per_experiment'] = processes_per_experiment
# Serial execution is always None.
assert exp_builder._get_experiment_mpi_group_size(experiments) is None
with mpiplus.mpiplus._simulated_mpi_environment(size=5):
assert exp_builder._get_experiment_mpi_group_size(experiments[:-1]) == processes_per_experiment
# When there are SAMS sampler, it's always 1.
assert exp_builder._get_experiment_mpi_group_size(experiments) == 1
def test_validation_wrong_options():
"""YAML validation raises exception with wrong molecules."""
options = [
("found unknown parameter", {'unknown_options': 3}),
("parameter minimize=100 is incompatible with True", {'minimize': 100}),
("invalid literal for int", {'processes_per_experiment': 'incorrect_string'})
]
for regex, option in options:
yield assert_raises_regexp, YamlParseError, regex, ExperimentBuilder._validate_options, option, True
def test_validation_correct_molecules():
"""Correct molecules YAML validation."""
paths = examples_paths()
molecules = [
{'name': 'toluene', 'leap': {'parameters': 'leaprc.gaff'}},
{'name': 'toluene', 'leap': {'parameters': ['leaprc.gaff', 'toluene.frcmod']}},
{'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'}},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'},
'antechamber': {'charge_method': None}},
{'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'},
'epik': {'ph': 7.6, 'ph_tolerance': 0.7, 'tautomerize': False, 'select': 0}},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'},
'antechamber': {'charge_method': None}, 'epik': {'select': 1}},
{'filepath': paths['abl']},
{'filepath': paths['abl'], 'leap': {'parameters': 'leaprc.ff99SBildn'}},
{'filepath': paths['abl'], 'leap': {'parameters': 'leaprc.ff99SBildn'}, 'select': 1},
{'filepath': paths['abl'], 'select': 'all'},
{'filepath': paths['abl'], 'select': 'all', 'strip_protons': True},
{'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {}},
{'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {'add_missing_residues': True}},
{'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {'add_missing_atoms': 'all', 'ph': '8.0'}},
{'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {'remove_heterogens': 'all'}},
{'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {'replace_nonstandard_residues': True}},
{'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {'apply_mutations': {'chain_id': 'A', 'mutations': 'T85I'}}},
{'filepath': paths['abl'], 'select': 'all', 'modeller': {'apply_mutations': {'chain_id': 'A', 'mutations': 'T85I'}}},
{'filepath': paths['abl'], 'select': 'all', 'modeller': {'apply_mutations': {'chain_id': 'A', 'mutations': 'WT'}}},
{'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {'apply_mutations': {'chain_id': 'A', 'mutations': 'I8A/T9A'}}},
{'filepath': paths['toluene'], 'leap': {'parameters': 'leaprc.gaff'}},
{'filepath': paths['benzene'], 'epik': {'select': 1, 'tautomerize': False}},
# Regions tests, make sure all other combos still work
{'name': 'toluene', 'regions': {'a_region': 4}},
{'name': 'toluene', 'regions': {'a_region': 'dsl string'}},
{'name': 'toluene', 'regions': {'a_region': [0, 2, 3]}},
{'name': 'toluene', 'regions': {'a_region': [0, 2, 3], 'another_region': [5, 4, 3]}},
{'smiles': 'Cc1ccccc1', 'regions': {'a_region': 4}},
{'smiles': 'Cc1ccccc1', 'regions': {'a_region': 'dsl string'}},
{'smiles': 'Cc1ccccc1', 'regions': {'a_region': [0, 2, 3]}},
{'smiles': 'Cc1ccccc1', 'regions': {'a_region': [0, 2, 3], 'another_region': [5, 4, 3]}},
{'filepath': paths['abl'], 'regions': {'a_region': 4}},
{'filepath': paths['abl'], 'regions': {'a_region': 'dsl string'}},
{'filepath': paths['abl'], 'regions': {'a_region': [0, 2, 3]}},
{'filepath': paths['abl'], 'regions': {'a_region': [0, 2, 3], 'another_region': [5, 4, 3]}},
{'filepath': paths['toluene'], 'regions': {'a_region': 4}},
{'filepath': paths['toluene'], 'regions': {'a_region': 'dsl string'}},
{'filepath': paths['toluene'], 'regions': {'a_region': [0, 2, 3]}},
{'filepath': paths['toluene'], 'regions': {'a_region': [0, 2, 3], 'another_region': [5, 4, 3]}}
]
for molecule in molecules:
yield ExperimentBuilder._validate_molecules, {'mol': molecule}
def test_validation_wrong_molecules():
"""YAML validation raises exception with wrong molecules."""
paths = examples_paths()
paths['wrongformat'] = utils.get_data_filename(os.path.join('tests', 'data', 'README.md'))
molecules = [
{'antechamber': {'charge_method': 'bcc'}},
{'filepath': paths['wrongformat']},
{'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'}, 'unknown': 4},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'}},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'invalid'},
'antechamber': {'charge_method': None}},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'},
'antechamber': {'charge_method': 'bcc'}},
{'filepath': 'nonexistentfile.pdb', 'leap': {'parameters': 'leaprc.ff14SB'}},
{'filepath': paths['toluene'], 'smiles': 'Cc1ccccc1'},
{'filepath': paths['toluene'], 'strip_protons': True},
{'filepath': paths['abl'], 'leap': {'parameters': 'oldff/leaprc.ff14SB'}, 'epik': {'select': 0}},
{'name': 'toluene', 'epik': 0},
{'name': 'toluene', 'epik': {'tautomerize': 6}},
{'name': 'toluene', 'epik': {'extract_range': 1}},
{'name': 'toluene', 'smiles': 'Cc1ccccc1'},
{'name': 3},
{'smiles': 'Cc1ccccc1', 'select': 1},
{'name': 'Cc1ccccc1', 'select': 1},
{'filepath': paths['abl'], 'leap': {'parameters': 'oldff/leaprc.ff14SB'}, 'select': 'notanoption'},
{'filepath': paths['abl'], 'regions': 5},
{'filepath': paths['abl'], 'regions': {'a_region': [-56, 5.23]}},
{'filepath': paths['toluene'], 'leap': {'parameters': 'leaprc.gaff'}, 'strip_protons': True},
]
for molecule in molecules:
yield assert_raises, YamlParseError, ExperimentBuilder._validate_molecules, {'mol': molecule}
def test_validation_correct_solvents():
"""Correct solvents YAML validation."""
solvents = [
{'nonbonded_method': 'Ewald', 'nonbonded_cutoff': '3*nanometers'},
{'nonbonded_method': 'PME', 'solvent_model': 'tip4pew'},
{'nonbonded_method': 'PME', 'solvent_model': 'tip3p', 'leap': {'parameters': 'leaprc.water.tip3p'}},
{'nonbonded_method': 'PME', 'clearance': '3*angstroms'},
{'nonbonded_method': 'PME'},
{'nonbonded_method': 'NoCutoff', 'implicit_solvent': 'OBC2'},
{'nonbonded_method': 'CutoffPeriodic', 'nonbonded_cutoff': '9*angstroms',
'clearance': '9*angstroms', 'positive_ion': 'Na+', 'negative_ion': 'Cl-',
'ionic_strength': '200*millimolar'},
{'implicit_solvent': 'OBC2', 'implicit_solvent_salt_conc': '1.0*nanomolar'},
{'nonbonded_method': 'PME', 'clearance': '3*angstroms', 'ewald_error_tolerance': 0.001},
]
for solvent in solvents:
yield ExperimentBuilder._validate_solvents, {'solv': solvent}
def test_validation_wrong_solvents():
"""YAML validation raises exception with wrong solvents."""
# Each test case is a pair (regexp_error, solvent_description).
solvents = [
("nonbonded_cutoff:\n- can be specified only with the following nonbonded methods \['CutoffPeriodic', 'CutoffNonPeriodic',\n 'Ewald', 'PME'\]",
{'nonbonded_cutoff': '3*nanometers'}),
("solvent_model:\n- unallowed value unknown_solvent_model",
{'nonbonded_method': 'PME', 'solvent_model': 'unknown_solvent_model'}),
("leap:\n- must be of dict type",
{'nonbonded_method': 'PME', 'solvent_model': 'tip3p', 'leap': 'leaprc.water.tip3p'}),
("implicit_solvent:\n- can be specified only if nonbonded method is NoCutoff",
{'nonbonded_method': 'PME', 'clearance': '3*angstroms', 'implicit_solvent': 'OBC2'}),
("blabla:\n- unknown field",
{'nonbonded_method': 'NoCutoff', 'blabla': '3*nanometers'}),
("''implicit_solvent'' cannot be coerced: module ''simtk.openmm.app'' has no\n attribute ''OBX2'''",
{'nonbonded_method': 'NoCutoff', 'implicit_solvent': 'OBX2'}),
("''implicit_solvent_salt_conc'' cannot be coerced: Units of 1.0\*angstrom",
{'implicit_solvent': 'OBC2', 'implicit_solvent_salt_conc': '1.0*angstrom'})
]
for regexp, solvent in solvents:
yield assert_raises_regexp, YamlParseError, regexp, ExperimentBuilder._validate_solvents, {'solv': solvent}
def test_validation_correct_systems():
"""Correct systems YAML validation."""
data_paths = examples_paths()
exp_builder = ExperimentBuilder()
basic_script = """
---
molecules:
rec: {{filepath: {0}, leap: {{parameters: leaprc.ff14SB}}}}
rec_reg: {{filepath: {0}, regions: {{receptregion: 'some dsl'}}, leap: {{parameters: leaprc.ff14SB}}}}
lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}
lig_reg: {{name: lig, regions: {{ligregion: [143, 123]}}, leap: {{parameters: leaprc.gaff}}}}
solvents:
solv: {{nonbonded_method: NoCutoff}}
solv2: {{nonbonded_method: NoCutoff, implicit_solvent: OBC2}}
solv3: {{nonbonded_method: PME, clearance: 10*angstroms}}
solv4: {{nonbonded_method: PME}}
""".format(data_paths['lysozyme'])
basic_script = yaml.load(textwrap.dedent(basic_script), Loader=yaml.FullLoader)
systems = [
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv'},
{'receptor': 'rec_reg', 'ligand': 'lig_reg', 'solvent': 'solv'},
{'receptor': 'rec_reg', 'ligand': 'lig', 'solvent': 'solv'},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv', 'pack': True},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv3',
'leap': {'parameters': ['leaprc.gaff', 'leaprc.ff14SB']}},
{'phase1_path': data_paths['bentol-complex'],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent': 'solv'},
{'phase1_path': data_paths['bentol-complex'],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent': 'solv4'},
{'phase1_path': data_paths['bentol-complex'],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent1': 'solv3',
'solvent2': 'solv2'},
{'phase1_path': data_paths['pxylene-complex'],
'phase2_path': data_paths['pxylene-solvent'],
'ligand_dsl': 'resname p-xylene', 'solvent': 'solv',
'gromacs_include_dir': data_paths['pxylene-gro-include']},
{'phase1_path': data_paths['pxylene-complex'],
'phase2_path': data_paths['pxylene-solvent'],
'ligand_dsl': 'resname p-xylene', 'solvent': 'solv'},
{'phase1_path': data_paths['toluene-solvent'],
'phase2_path': data_paths['toluene-vacuum'],
'ligand_dsl': 'resname TOL'},
{'phase1_path': data_paths['toluene-solvent'],
'phase2_path': data_paths['toluene-vacuum'],
'ligand_dsl': 'resname TOL', 'solvent_dsl': 'not resname TOL'},
{'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv'},
{'solute': 'lig_reg', 'solvent1': 'solv', 'solvent2': 'solv'},
{'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv',
'leap': {'parameters': 'leaprc.gaff'}}
]
for system in systems:
modified_script = basic_script.copy()
modified_script['systems'] = {'sys': system}
yield exp_builder.parse, modified_script
def test_validation_wrong_systems():
"""YAML validation raises exception with wrong systems specification."""
data_paths = examples_paths()
exp_builder = ExperimentBuilder()
basic_script = """
---
molecules:
rec: {{filepath: {0}, leap: {{parameters: oldff/leaprc.ff14SB}}}}
rec_region: {{filepath: {0}, regions: {{a_region: 'some string'}}, leap: {{parameters: oldff/leaprc.ff14SB}}}}
lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}
lig_region: {{name: lig, regions: {{a_region: 'some string'}}, leap: {{parameters: leaprc.gaff}}}}
solvents:
solv: {{nonbonded_method: NoCutoff}}
solv2: {{nonbonded_method: NoCutoff, implicit_solvent: OBC2}}
solv3: {{nonbonded_method: PME, clearance: 10*angstroms}}
solv4: {{nonbonded_method: PME}}
""".format(data_paths['lysozyme'])
basic_script = yaml.load(textwrap.dedent(basic_script), Loader=yaml.FullLoader)
# Each test case is a pair (regexp_error, system_description).
systems = [
("'solvent' is required",
{'receptor': 'rec', 'ligand': 'lig'}),
("regions\(s\) clashing",
{'receptor': 'rec_region', 'ligand': 'lig_region', 'solvent': 'solv'}),
("ligand:\n- must be of string type",
{'receptor': 'rec', 'ligand': 1, 'solvent': 'solv'}),
("solvent:\n- must be of string type",
{'receptor': 'rec', 'ligand': 'lig', 'solvent': ['solv', 'solv']}),
("unallowed value unknown",
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'unknown'}),
("solv4 does not specify clearance",
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv4',
'leap': {'parameters': ['leaprc.gaff', 'leaprc.ff14SB']}}),
("parameters:\n- unknown field",
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv3',
'parameters': 'leaprc.ff14SB'}),
("phase1_path:\n- must be of list type",
{'phase1_path': data_paths['bentol-complex'][0],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent': 'solv'}),
("File path nonexistingpath.prmtop does not exist.",
{'phase1_path': ['nonexistingpath.prmtop', 'nonexistingpath.inpcrd'],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent': 'solv'}),
("ligand_dsl:\n- must be of string type",
{'phase1_path': data_paths['bentol-complex'],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 3.4, 'solvent': 'solv'}),
("unallowed value unknown",
{'phase1_path': data_paths['bentol-complex'],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent1': 'unknown',
'solvent2': 'solv2'}),
("unallowed value cantbespecified",
{'phase1_path': data_paths['toluene-solvent'],
'phase2_path': data_paths['toluene-vacuum'],
'ligand_dsl': 'resname TOL', 'solvent': 'cantbespecified'}),
("field 'ligand' is required",
{'receptor': 'rec', 'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv'}),
("''ligand'' must not be present with ''solute''",
{'ligand': 'lig', 'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv'}),
("leap:\n- must be of dict type",
{'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv', 'leap': 'leaprc.gaff'})
]
for regexp, system in systems:
modified_script = basic_script.copy()
modified_script['systems'] = {'sys': system}
yield assert_raises_regexp, YamlParseError, regexp, exp_builder.parse, modified_script
def test_validation_correct_mcmc_moves():
"""Correct samplers YAML validation."""
mcmc_moves = [
{'type': 'LangevinSplittingDynamicsMove', 'reassign_velocities': False,
'splitting': 'VRORV', 'n_steps': 10, 'timestep': '2.0*femtosecond'},
{'type': 'SequenceMove', 'move_list': [
{'type': 'MCDisplacementMove', 'displacement_sigma': '5.0*nanometers'},
{'type': 'LangevinSplittingDynamicsMove'}
]},
]
for mcmc_move in mcmc_moves:
yield ExperimentBuilder._validate_mcmc_moves, {'mcmc_moves': {'mcmcmove1': mcmc_move}}
def test_validation_wrong_mcmc_moves():
"""YAML validation raises exception with wrong mcmc move specification."""
# Each test case is a pair (regexp_error, mcmc_move_description).
mcmc_moves = [
("The expression 2.0 must be a string",
{'type': 'LangevinSplittingDynamicsMove', 'timestep': 2.0}),
("Could not find class UnknownMoveClass",
{'type': 'UnknownMoveClass'}),
("Could not find class NestedUnknownMoveClass",
{'type': 'SequenceMove', 'move_list': [
{'type': 'MCDisplacementMove'},
{'type': 'NestedUnknownMoveClass'}
]})
]
for regexp, mcmc_move in mcmc_moves:
script = {'mcmc_moves': {'mcmc_move1': mcmc_move}}
yield assert_raises_regexp, YamlParseError, regexp, ExperimentBuilder._validate_mcmc_moves, script
def test_validation_correct_samplers():
"""Correct samplers YAML validation."""
samplers = [
{'type': 'MultiStateSampler', 'locality': 3},
{'type': 'ReplicaExchangeSampler'},
# MCMCMove 'single' is defined in get_template_script().
{'type': 'SAMSSampler', 'mcmc_moves': 'single'},
{'type': 'ReplicaExchangeSampler', 'number_of_iterations': 5, 'replica_mixing_scheme': 'swap-neighbors'},
{'type': 'ReplicaExchangeSampler', 'number_of_iterations': 5, 'replica_mixing_scheme': None}
]
exp_builder = ExperimentBuilder(get_template_script())
for sampler in samplers:
script = {'samplers': {'sampler1': sampler}}
yield exp_builder._validate_samplers, script
def test_validation_wrong_samplers():
"""YAML validation raises exception with wrong experiments specification."""
# Each test case is a pair (regexp_error, sampler_description).
samplers = [
("locality must be an int",
{'type': 'MultiStateSampler', 'locality': 3.0}),
("unallowed value unknown",
{'type': 'ReplicaExchangeSampler', 'mcmc_moves': 'unknown'}),
("Could not find class NonExistentSampler",
{'type': 'NonExistentSampler'}),
("found unknown parameter",
{'type': 'ReplicaExchangeSampler', 'unknown_kwarg': 5}),
]
exp_builder = ExperimentBuilder(get_template_script())
for regexp, sampler in samplers:
script = {'samplers': {'sampler1': sampler}}
yield assert_raises_regexp, YamlParseError, regexp, exp_builder._validate_samplers, script
def test_order_phases():
"""YankLoader preserves protocol phase order."""
yaml_content_template = """
---
absolute-binding:
{}:
alchemical_path:
lambda_electrostatics: [1.0, 0.5, 0.0]
lambda_sterics: [1.0, 0.5, 0.0]
{}:
alchemical_path:
lambda_electrostatics: [1.0, 0.5, 0.0]
lambda_sterics: [1.0, 0.5, 0.0]
{}:
alchemical_path:
lambda_electrostatics: [1.0, 0.5, 0.0]
lambda_sterics: [1.0, 0.5, 0.0]"""
# Find order of phases for which normal parsing is not ordered or the test is useless
for ordered_phases in itertools.permutations(['athirdphase', 'complex', 'solvent']):
yaml_content = yaml_content_template.format(*ordered_phases)
parsed = yaml.load(textwrap.dedent(yaml_content), Loader=yaml.FullLoader)
if tuple(parsed['absolute-binding'].keys()) != ordered_phases:
break
# Insert !Ordered tag
yaml_content = yaml_content.replace('binding:', 'binding: !Ordered')
parsed = yank_load(yaml_content)
assert tuple(parsed['absolute-binding'].keys()) == ordered_phases
def test_validation_correct_protocols():
"""Correct protocols YAML validation."""
basic_protocol = yank_load(standard_protocol)
# Alchemical paths
protocols = [
{'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 0.0]},
{'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 0.0],
'lambda_torsions': [1.0, 0.5, 0.0], 'lambda_angles': [1.0, 0.5, 0.0]},
{'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 0.0],
'temperature': ['300*kelvin', '340*kelvin', '300*kelvin']},
'auto',
]
for protocol in protocols:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding']['complex']['alchemical_path'] = protocol
yield ExperimentBuilder._validate_protocols, modified_protocol
# Try different options both with 'auto' and a path with alchemical functions.
function_protocol = copy.deepcopy(basic_protocol)
function_protocol['absolute-binding']['complex']['alchemical_path'] = {
'lambda_electrostatics': 'lambda**2',
'lambda_sterics': 'sqrt(lambda)',
'lambda': [1.0, 0.0]
}
auto_protocol = copy.deepcopy(basic_protocol)
auto_protocol['absolute-binding']['complex']['alchemical_path'] = 'auto'
trailblazer_options = [
{'n_equilibration_iterations': 1000, 'n_samples_per_state': 100,
'thermodynamic_distance': 0.5, 'distance_tolerance': 0.05},
{'n_equilibration_iterations': 100, 'n_samples_per_state': 10},
{'thermodynamic_distance': 1.0, 'distance_tolerance': 0.5},
{'function_variable_name': 'lambda'},
{'function_variable_name': 'lambda', 'reversed_direction': False}
]
for opts in trailblazer_options:
# Use the function protocol if the function variable is specified.
if 'function_variable_name' in opts:
modified_protocol = copy.deepcopy(function_protocol)
else:
modified_protocol = copy.deepcopy(auto_protocol)
modified_protocol['absolute-binding']['complex']['trailblazer_options'] = opts
yield ExperimentBuilder._validate_protocols, modified_protocol
# Multiple phases.
alchemical_path = copy.deepcopy(basic_protocol['absolute-binding']['complex'])
protocols = [
{'complex': alchemical_path, 'solvent': alchemical_path},
{'complex': alchemical_path, 'solvent': {'alchemical_path': 'auto'}},
{'my-complex': alchemical_path, 'my-solvent': alchemical_path},
{'solvent1': alchemical_path, 'solvent2': alchemical_path},
{'solvent1variant': alchemical_path, 'solvent2variant': alchemical_path},
collections.OrderedDict([('a', alchemical_path), ('z', alchemical_path)]),
collections.OrderedDict([('z', alchemical_path), ('a', alchemical_path)])
]
for protocol in protocols:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding'] = protocol
yield ExperimentBuilder._validate_protocols, modified_protocol
sorted_protocol = ExperimentBuilder._validate_protocols(modified_protocol)['absolute-binding']
if isinstance(protocol, collections.OrderedDict):
assert sorted_protocol.keys() == protocol.keys()
else:
assert isinstance(sorted_protocol, collections.OrderedDict)
first_phase = next(iter(sorted_protocol.keys())) # py2/3 compatible
assert 'complex' in first_phase or 'solvent1' in first_phase
def test_validation_wrong_protocols():
"""YAML validation raises exception with wrong alchemical protocols."""
basic_protocol = yank_load(standard_protocol)
# Alchemical paths
protocols = [
{'lambda_electrostatics': [1.0, 0.5, 0.0]},
{'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 'wrong!']},
{'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 11000.0]},
{'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, -0.5]},
{'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': 0.0},
{'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 0.0], 3: 2}
]
for protocol in protocols:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding']['complex']['alchemical_path'] = protocol
yield assert_raises, YamlParseError, ExperimentBuilder._validate_protocols, modified_protocol
# Try different options both with 'auto' and a path with alchemical functions.
auto_path = 'auto'
no_lambda_path = {'lambda_electrostatics': 'lambda**2', 'lambda_sterics': 'sqrt(lambda)'}
hardcoded_path = {'lambda_electrostatics': [1.0, 0.0], 'lambda_sterics': [1.0, 0.0]}
correct_lambda_path = {'lambda': [1.0, 0.0], **no_lambda_path}
str_lambda_path = {'lambda': 'string', **no_lambda_path}
three_lambda_path = {'lambda': [1.0, 0.5, 0.0], **no_lambda_path}
# Each test case is (error_regex, options, alchemical_path)
trailblazer_options = [
("n_equilibration_iterations:\n - must be of integer type",
{'n_equilibration_iterations': 'bla'}, auto_path),
("Only mathematical expressions have been given with no values for their variables",
{}, no_lambda_path),
("Mathematical expressions were detected but no function variable name was given",
{}, correct_lambda_path),
("Function variable name 'lambda' is not defined in 'alchemical_path'",
{'function_variable_name': 'lambda'}, hardcoded_path),
("Only mathematical expressions have been given with no values for their variables",
{'function_variable_name': 'lambda'}, str_lambda_path),
("Only the two end-point values of function variable 'lambda' should be given.",
{'function_variable_name': 'lambda'}, three_lambda_path),
]
for regex, opts, alchemical_path in trailblazer_options:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding']['complex']['alchemical_path'] = alchemical_path
modified_protocol['absolute-binding']['complex']['trailblazer_options'] = opts
yield assert_raises_regexp, YamlParseError, regex, ExperimentBuilder._validate_protocols, modified_protocol
# Phases
alchemical_path = copy.deepcopy(basic_protocol['absolute-binding']['complex'])
protocols = [
{'complex': alchemical_path},
{2: alchemical_path, 'solvent': alchemical_path},
{'complex': alchemical_path, 'solvent': alchemical_path, 'thirdphase': alchemical_path},
{'my-complex-solvent': alchemical_path, 'my-solvent': alchemical_path},
{'my-complex': alchemical_path, 'my-complex-solvent': alchemical_path},
{'my-complex': alchemical_path, 'my-complex': alchemical_path},
{'complex': alchemical_path, 'solvent1': alchemical_path, 'solvent2': alchemical_path},
{'my-phase1': alchemical_path, 'my-phase2': alchemical_path},
collections.OrderedDict([('my-phase1', alchemical_path), ('my-phase2', alchemical_path),
('my-phase3', alchemical_path)])
]
for protocol in protocols:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding'] = protocol
yield assert_raises, YamlParseError, ExperimentBuilder._validate_protocols, modified_protocol
def test_validation_correct_experiments():
"""Correct experimentYAML validation."""
exp_builder = ExperimentBuilder()
basic_script = """
---
molecules:
rec: {{filepath: {}, leap: {{parameters: oldff/leaprc.ff14SB}}}}
lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}
solvents:
solv: {{nonbonded_method: NoCutoff}}
systems:
sys: {{receptor: rec, ligand: lig, solvent: solv}}
protocols:{}
""".format(examples_paths()['lysozyme'], standard_protocol)
basic_script = yank_load(basic_script)
bor = {'system': 'sys', 'protocol': 'absolute-binding', 'restraint': {
'type': 'Boresch', 'restrained_receptor_atoms': [1335, 1339, 1397],
'restrained_ligand_atoms': [2609, 2607, 2606], 'r_aA0': '0.35*nanometer',
'K_r': '20.0*kilocalories_per_mole/angstrom**2'}}
period_tor_bor = {**bor}
period_tor_bor['restraint']['type'] = 'PeriodicTorsionBoresch'
experiments = [
{'system': 'sys', 'protocol': 'absolute-binding'},
{'system': 'sys', 'protocol': 'absolute-binding', 'restraint': {'type': 'Harmonic'}},
{'system': 'sys', 'protocol': 'absolute-binding', 'restraint': {
'type': 'Harmonic', 'spring_constant': '8*kilojoule_per_mole/nanometers**2'}},
{'system': 'sys', 'protocol': 'absolute-binding', 'restraint': {
'type': 'FlatBottom', 'well_radius': '5.2*nanometers', 'restrained_receptor_atoms': 1644}},
bor,
period_tor_bor
]
for experiment in experiments:
modified_script = basic_script.copy()
modified_script['experiments'] = experiment
yield exp_builder.parse, modified_script
def test_validation_wrong_experiments():
"""YAML validation raises exception with wrong experiments specification."""
exp_builder = ExperimentBuilder()
basic_script = """
---
molecules:
rec: {{filepath: {}, leap: {{parameters: oldff/leaprc.ff14SB}}}}
lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}
solvents:
solv: {{nonbonded_method: NoCutoff}}
systems:
sys: {{receptor: rec, ligand: lig, solvent: solv}}
protocols:{}
""".format(examples_paths()['lysozyme'], standard_protocol)
basic_script = yank_load(basic_script)
experiments = [
{'system': 'unknownsys', 'protocol': 'absolute-binding'},
{'system': 'sys', 'protocol': 'unknownprotocol'},
{'system': 'sys'},
{'protocol': 'absolute-binding'},
# Restraint does not specify "type".
{'system': 'sys', 'protocol': 'absolute-binding', 'restraint': {
'spring_constant': '8*kilojoule_per_mole/nanometers**2'}},
# Restraint has unknown constructor parameter.
{'system': 'sys', 'protocol': 'absolute-binding', 'restraint': {
'type': 'Harmonic', 'unknown': '3*meters'}},
]
for experiment in experiments:
modified_script = basic_script.copy()
modified_script['experiments'] = experiment
yield assert_raises, YamlParseError, exp_builder.parse, modified_script
# ==============================================================================
# Molecules pipeline
# ==============================================================================
def test_yaml_mol2_antechamber():
"""Test antechamber setup of molecule files."""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
exp_builder = ExperimentBuilder(yaml_content)
exp_builder._db._setup_molecules('benzene')
output_dir = exp_builder._db.get_molecule_dir('benzene')
gaff_path = os.path.join(output_dir, 'benzene.gaff.mol2')
frcmod_path = os.path.join(output_dir, 'benzene.frcmod')
# Get last modified time
last_touched_gaff = os.stat(gaff_path).st_mtime
last_touched_frcmod = os.stat(frcmod_path).st_mtime
# Check that output files have been created
assert os.path.exists(gaff_path)
assert os.path.exists(frcmod_path)
assert os.path.getsize(gaff_path) > 0
assert os.path.getsize(frcmod_path) > 0
# Check that setup_molecules do not recreate molecule files
time.sleep(0.5) # st_mtime doesn't have much precision
exp_builder._db._setup_molecules('benzene')
assert last_touched_gaff == os.stat(gaff_path).st_mtime
assert last_touched_frcmod == os.stat(frcmod_path).st_mtime
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_setup_name_smiles_openeye_charges():
"""Setup molecule from name and SMILES with openeye charges and gaff."""
with mmtools.utils.temporary_directory() as tmp_dir:
molecules_ids = ['toluene-smiles', 'p-xylene-name']
yaml_content = get_template_script(tmp_dir, keep_openeye=True)
exp_builder = ExperimentBuilder(yaml_content)
exp_builder._db._setup_molecules(*molecules_ids)
for mol in molecules_ids:
output_dir = exp_builder._db.get_molecule_dir(mol)
output_basepath = os.path.join(output_dir, mol)
# Check that all the files have been created
assert os.path.exists(output_basepath + '.mol2')
assert os.path.exists(output_basepath + '.gaff.mol2')
assert os.path.exists(output_basepath + '.frcmod')
assert os.path.getsize(output_basepath + '.mol2') > 0
assert os.path.getsize(output_basepath + '.gaff.mol2') > 0
assert os.path.getsize(output_basepath + '.frcmod') > 0
atoms_frame, _ = mdtraj.formats.mol2.mol2_to_dataframes(output_basepath + '.mol2')
input_charges = atoms_frame['charge']
atoms_frame, _ = mdtraj.formats.mol2.mol2_to_dataframes(output_basepath + '.gaff.mol2')
output_charges = atoms_frame['charge']
# With openeye:am1bcc charges, the final charges should be unaltered
if mol == 'p-xylene-name':
assert input_charges.equals(output_charges)
else: # With antechamber, sqm should alter the charges a little
assert not input_charges.equals(output_charges)
# Check that molecules are resumed correctly
exp_builder = ExperimentBuilder(yaml_content)
exp_builder._db._setup_molecules(*molecules_ids)
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_clashing_atoms():
"""Check that clashing atoms are resolved."""
benzene_path = examples_paths()['benzene']
toluene_path = examples_paths()['toluene']
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir, keep_openeye=True)
system_id = 'explicit-system'
system_description = yaml_content['systems'][system_id]
system_description['pack'] = True
system_description['solvent'] = utils.CombinatorialLeaf(['vacuum', 'PME'])
# Sanity check: at the beginning molecules clash
toluene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(toluene_path, molecule_idx=0))
benzene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(benzene_path, molecule_idx=0))
assert pipeline.compute_min_dist(toluene_pos, benzene_pos) < pipeline.SetupDatabase.CLASH_THRESHOLD
exp_builder = ExperimentBuilder(yaml_content)
for sys_id in [system_id + '_vacuum', system_id + '_PME']:
system_dir = os.path.dirname(
exp_builder._db.get_system(sys_id)[0].position_path)
# Get positions of molecules in the final system
prmtop = openmm.app.AmberPrmtopFile(os.path.join(system_dir, 'complex.prmtop'))
inpcrd = openmm.app.AmberInpcrdFile(os.path.join(system_dir, 'complex.inpcrd'))
positions = inpcrd.getPositions(asNumpy=True).value_in_unit(unit.angstrom)
topography = Topography(prmtop.topology, ligand_atoms='resname TOL')
benzene_pos2 = positions.take(topography.receptor_atoms, axis=0)
toluene_pos2 = positions.take(topography.ligand_atoms, axis=0)
# atom_indices = pipeline.find_components(prmtop.createSystem(), prmtop.topology, 'resname TOL')
# benzene_pos2 = positions.take(atom_indices['receptor'], axis=0)
# toluene_pos2 = positions.take(atom_indices['ligand'], axis=0)
# Test that clashes are resolved in the system
min_dist, max_dist = pipeline.compute_min_max_dist(toluene_pos2, benzene_pos2)
assert min_dist >= pipeline.SetupDatabase.CLASH_THRESHOLD
# For solvent we check that molecule is within the box
if sys_id == system_id + '_PME':
assert max_dist <= exp_builder._db.solvents['PME']['clearance'].value_in_unit(unit.angstrom)
@unittest.skipIf(not moltools.schrodinger.is_schrodinger_suite_installed(),
"This test requires Schrodinger's suite")
def test_epik_enumeration():
"""Test epik protonation state enumeration."""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir, keep_schrodinger=True)
exp_builder = ExperimentBuilder(yaml_content)
mol_ids = ['benzene-epik0', 'benzene-epikcustom']
exp_builder._db._setup_molecules(*mol_ids)
for mol_id in mol_ids:
output_dir = exp_builder._db.get_molecule_dir(mol_id)
output_basename = os.path.join(output_dir, mol_id + '-epik.')
assert os.path.exists(output_basename + 'mol2')
assert os.path.getsize(output_basename + 'mol2') > 0
assert os.path.exists(output_basename + 'sdf')
assert os.path.getsize(output_basename + 'sdf') > 0
def setup_molecule_output_check(exp_builder_db, mol_id, output_path):
"""
Helper function to check molecules which have to go through the setup pipeline
Accepts the experiment builder database, the mol_id, and the output_path
Tries to setup the given mol_id and makes sure the output exists and is non-zero
"""
exp_builder_db._setup_molecules(mol_id)
assert os.path.exists(output_path)
assert os.path.getsize(output_path) > 0
def test_strip_protons():
"""Test that protons are stripped correctly for tleap."""
mol_id = 'Abl'
abl_path = examples_paths()['abl']
with mmtools.utils.temporary_directory() as tmp_dir:
# Safety check: protein must have protons
has_hydrogen = False
with open(abl_path, 'r') as f:
for line in f:
if line[:6] == 'ATOM ' and (line[12] == 'H' or line[13] == 'H'):
has_hydrogen = True
break
assert has_hydrogen
yaml_content = get_template_script(tmp_dir)
exp_builder = ExperimentBuilder(yaml_content)
output_dir = exp_builder._db.get_molecule_dir(mol_id)
output_path = os.path.join(output_dir, 'Abl.pdb')
# We haven't set the strip_protons options, so this shouldn't do anything
exp_builder._db._setup_molecules(mol_id)
assert not os.path.exists(output_path)
# Now we set the strip_protons options and repeat
exp_builder._db.molecules[mol_id]['strip_protons'] = True
setup_molecule_output_check(exp_builder._db, mol_id, output_path)
# The new pdb does not have hydrogen atoms
has_hydrogen = False
with open(output_path, 'r') as f:
for line in f:
if line[:6] == 'ATOM ' and (line[12] == 'H' or line[13] == 'H'):
has_hydrogen = True
break
assert not has_hydrogen
def test_pdbfixer_mutations():
"""Test that pdbfixer can apply mutations correctly."""
mol_id = 'Abl'
abl_path = examples_paths()['abl']
with mmtools.utils.temporary_directory() as tmp_dir:
# Safety check: protein must have WT residue: THR at residue 85 in chain A
has_wt_residue = False
with open(abl_path, 'r') as f:
for line in f:
if (line[:6] == 'ATOM ') and (line[21] == 'A') and (int(line[22:26]) == 85) and (line[17:20]=='THR'):
has_wt_residue = True
break
assert has_wt_residue
yaml_content = get_template_script(tmp_dir)
exp_builder = ExperimentBuilder(yaml_content)
output_dir = exp_builder._db.get_molecule_dir(mol_id)
output_path = os.path.join(output_dir, 'Abl.pdb')
# We haven't set the strip_protons options, so this shouldn't do anything
exp_builder._db._setup_molecules(mol_id)
assert not os.path.exists(output_path)
# Now we set the strip_protons options and repeat
exp_builder._db.molecules[mol_id]['pdbfixer'] = {
'apply_mutations' : {
'chain_id' : 'A',
'mutations': 'T85I',
}
}
setup_molecule_output_check(exp_builder._db, mol_id, output_path)
# Safety check: protein must have mutated residue: ILE at residue 85 in chain A
has_mut_residue = False
with open(output_path, 'r') as f:
for line in f:
if (line[:6] == 'ATOM ') and (line[21] == 'A') and (int(line[22:26]) == 85) and (line[17:20]=='ILE'):
has_mut_residue = True
break
assert has_mut_residue
@unittest.skipIf(not utils.is_modeller_installed(), "This test requires Salilab Modeller")
def test_modeller_mutations():
"""Test that modeller can apply mutations correctly."""
mol_id = 'Abl'
abl_path = examples_paths()['abl']
with mmtools.utils.temporary_directory() as tmp_dir:
# Safety check: protein must have WT residue: THR at residue 85 in chain A
has_wt_residue = False
with open(abl_path, 'r') as f:
for line in f:
if (line[:6] == 'ATOM ') and (line[21] == 'A') and (int(line[22:26]) == 85) and (line[17:20]=='THR'):
has_wt_residue = True
break
assert has_wt_residue
yaml_content = get_template_script(tmp_dir)
exp_builder = ExperimentBuilder(yaml_content)
output_dir = exp_builder._db.get_molecule_dir(mol_id)
output_path = os.path.join(output_dir, 'Abl.pdb')
# We haven't set the strip_protons options, so this shouldn't do anything
exp_builder._db._setup_molecules(mol_id)
assert not os.path.exists(output_path)
# Calling modeller with WT creates a file (although the protein is not mutated).
exp_builder._db.molecules[mol_id]['modeller'] = {
'apply_mutations': {
'chain_id': 'A',
'mutations': 'WT',
}
}
setup_molecule_output_check(exp_builder._db, mol_id, output_path)
os.remove(output_path) # Remove file for next check.
# Reinitialize exp_builder
exp_builder = ExperimentBuilder(yaml_content)
# Now we set the strip_protons options and repeat for the mutant case
exp_builder._db.molecules[mol_id]['modeller'] = {
'apply_mutations': {
'chain_id': 'A',
'mutations': 'T85I',
}
}
setup_molecule_output_check(exp_builder._db, mol_id, output_path)
# Safety check: protein must have mutated residue: ILE at residue 85 in chain A
has_mut_residue = False
with open(output_path, 'r') as f:
for line in f:
if (line[:6] == 'ATOM ') and (line[21] == 'A') and (int(line[22:26]) == 85) and (line[17:20]=='ILE'):
has_mut_residue = True
break
assert has_mut_residue
def test_pdbfixer_processing():
"""Test that PDB fixer correctly parses and sets up the molecules"""
mol_id = 'Abl'
pdb_fixer_modifications = [
{'pdbfixer': {}},
{'pdbfixer': {'add_missing_residues': True}},
{'pdbfixer': {'add_missing_atoms': 'all', 'ph': '8.0'}},
{'pdbfixer': {'remove_heterogens': 'all'}},
{'pdbfixer': {'replace_nonstandard_residues': True}},
{'pdbfixer': {'apply_mutations': {'chain_id': 'A', 'mutations': 'T85I'}}},
{'pdbfixer': {'apply_mutations': {'chain_id': 'A', 'mutations': 'I8A/T9A'}}},
]
for mod in pdb_fixer_modifications:
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
exp_builder = ExperimentBuilder(yaml_content)
output_dir = exp_builder._db.get_molecule_dir(mol_id)
output_path = os.path.join(output_dir, 'Abl.pdb')
exp_builder._db.molecules[mol_id].update(mod)
yield setup_molecule_output_check, exp_builder._db, mol_id, output_path
# ==============================================================================
# Combinatorial expansion
# ==============================================================================
class TestMultiMoleculeFiles(object):
@classmethod
def setup_class(cls):
"""Create a 2-frame PDB file in pdb_path. The second frame has same positions
of the first one but with inversed z-coordinate."""
# Creating a temporary directory and generating paths for output files
cls.tmp_dir = tempfile.mkdtemp()
cls.pdb_path = os.path.join(cls.tmp_dir, 'multipdb.pdb')
cls.smiles_path = os.path.join(cls.tmp_dir, 'multismiles.smiles')
cls.sdf_path = os.path.join(cls.tmp_dir, 'multisdf.sdf')
cls.mol2_path = os.path.join(cls.tmp_dir, 'multimol2.mol2')
# Rotation matrix to invert z-coordinate, i.e. flip molecule w.r.t. x-y plane
rot = np.array([[1, 0, 0], [0, 1, 0], [0, 0, -1]])
# Create 2-frame PDB file. First frame is lysozyme, second is lysozyme with inverted z
lysozyme_path = examples_paths()['lysozyme']
lysozyme = PDBFile(lysozyme_path)
# Rotate positions to invert z for the second frame
symmetric_pos = lysozyme.getPositions(asNumpy=True).value_in_unit(unit.angstrom)
symmetric_pos = symmetric_pos.dot(rot) * unit.angstrom
with open(cls.pdb_path, 'w') as f:
PDBFile.writeHeader(lysozyme.topology, file=f)
PDBFile.writeModel(lysozyme.topology, lysozyme.positions, file=f, modelIndex=0)
PDBFile.writeModel(lysozyme.topology, symmetric_pos, file=f, modelIndex=1)
# Create 2-molecule SMILES file
with open(cls.smiles_path, 'w') as f:
f.write('# comment\n')
f.write('benzene,c1ccccc1\n')
f.write('toluene,Cc1ccccc1\n')
# Create 2-molecule sdf and mol2 with OpenEye
if utils.is_openeye_installed():
from openeye import oechem
oe_benzene = utils.load_oe_molecules(examples_paths()['benzene'], molecule_idx=0)
oe_benzene_pos = utils.get_oe_mol_positions(oe_benzene).dot(rot)
oe_benzene.NewConf(oechem.OEFloatArray(oe_benzene_pos.flatten()))
# Save 2-conformer benzene in sdf and mol2 format
utils.write_oe_molecule(oe_benzene, cls.sdf_path)
utils.write_oe_molecule(oe_benzene, cls.mol2_path, mol2_resname='MOL')
@classmethod
def teardown_class(cls):
shutil.rmtree(cls.tmp_dir)
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_expand_molecules(self):
"""Check that combinatorial molecules are handled correctly."""
yaml_content = """
---
molecules:
rec:
filepath: !Combinatorial [{}, {}]
leap: {{parameters: oldff/leaprc.ff14SB}}
lig:
name: !Combinatorial [iupac1, iupac2]
leap: {{parameters: leaprc.gaff}}
epik:
select: !Combinatorial [0, 2]
multi:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
select: all
smiles:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: all
sdf:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: all
mol2:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: all
solvents:
solv1:
nonbonded_method: NoCutoff
solv2:
nonbonded_method: PME
nonbonded_cutoff: 1*nanometer
clearance: 10*angstroms
protocols:{}
systems:
sys:
receptor: !Combinatorial [rec, multi]
ligand: lig
solvent: !Combinatorial [solv1, solv2]
experiments:
system: sys
protocol: absolute-binding
""".format(self.sdf_path, self.mol2_path, self.pdb_path,
self.smiles_path, self.sdf_path, self.mol2_path,
indent(indent(standard_protocol)))
yaml_content = textwrap.dedent(yaml_content)
expected_content = """
---
molecules:
rec_multisdf:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
rec_multimol2:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
lig_0_iupac1:
name: iupac1
leap: {{parameters: leaprc.gaff}}
epik: {{select: 0}}
lig_2_iupac1:
name: iupac1
leap: {{parameters: leaprc.gaff}}
epik: {{select: 2}}
lig_0_iupac2:
name: iupac2
leap: {{parameters: leaprc.gaff}}
epik: {{select: 0}}
lig_2_iupac2:
name: iupac2
leap: {{parameters: leaprc.gaff}}
epik: {{select: 2}}
multi_0:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
select: 0
multi_1:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
select: 1
smiles_0:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 0
smiles_1:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 1
sdf_0:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 0
sdf_1:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 1
mol2_0:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 0
mol2_1:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 1
solvents:
solv1:
nonbonded_method: NoCutoff
solv2:
nonbonded_method: PME
nonbonded_cutoff: 1*nanometer
clearance: 10*angstroms
protocols:{}
systems:
sys:
receptor: !Combinatorial [rec_multimol2, rec_multisdf, multi_0, multi_1]
ligand: !Combinatorial [lig_0_iupac1, lig_0_iupac2, lig_2_iupac1, lig_2_iupac2]
solvent: !Combinatorial [solv1, solv2]
experiments:
system: sys
protocol: absolute-binding
""".format(self.sdf_path, self.mol2_path, self.pdb_path, self.pdb_path,
self.smiles_path, self.smiles_path, self.sdf_path, self.sdf_path,
self.mol2_path, self.mol2_path, indent(standard_protocol))
expected_content = textwrap.dedent(expected_content)
raw = yank_load(yaml_content)
expanded = ExperimentBuilder(yaml_content)._expand_molecules(raw)
expected = yank_load(expected_content)
assert expanded == expected, 'Expected:\n{}\n\nExpanded:\n{}'.format(
expected['systems'], expanded['systems'])
def test_select_pdb_conformation(self):
"""Check that frame selection in multi-model PDB files works."""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
output_dir: {}
setup_dir: .
molecules:
selected:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
select: 1
""".format(tmp_dir, self.pdb_path)
yaml_content = textwrap.dedent(yaml_content)
exp_builder = ExperimentBuilder(yaml_content)
# The molecule now is neither set up nor processed
is_setup, is_processed = exp_builder._db.is_molecule_setup('selected')
assert is_setup is False
assert is_processed is False
# The setup of the molecule must isolate the frame in a single-frame PDB
exp_builder._db._setup_molecules('selected')
selected_pdb_path = os.path.join(tmp_dir, pipeline.SetupDatabase.MOLECULES_DIR,
'selected', 'selected.pdb')
assert os.path.exists(os.path.join(selected_pdb_path))
assert os.path.getsize(os.path.join(selected_pdb_path)) > 0
# The positions must be the ones of the second frame
selected_pdb = PDBFile(selected_pdb_path)
selected_pos = selected_pdb.getPositions(asNumpy=True)
second_pos = PDBFile(self.pdb_path).getPositions(asNumpy=True, frame=1)
assert selected_pdb.getNumFrames() == 1
assert (selected_pos == second_pos).all()
# The description of the molecule is now updated
assert os.path.normpath(exp_builder._db.molecules['selected']['filepath']) == selected_pdb_path
# The molecule now both set up and processed
is_setup, is_processed = exp_builder._db.is_molecule_setup('selected')
assert is_setup is True
assert is_processed is True
# A new instance of ExperimentBuilder is able to resume with correct molecule
exp_builder = ExperimentBuilder(yaml_content)
is_setup, is_processed = exp_builder._db.is_molecule_setup('selected')
assert is_setup is True
assert is_processed is True
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_setup_smiles(self):
"""Check that setup molecule from SMILES files works."""
from openeye.oechem import OEMolToSmiles
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
output_dir: {}
setup_dir: .
molecules:
take-first:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select-second:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 1
""".format(tmp_dir, self.smiles_path, self.smiles_path)
yaml_content = textwrap.dedent(yaml_content)
exp_builder = ExperimentBuilder(yaml_content)
for i, mol_id in enumerate(['take-first', 'select-second']):
# The molecule now is neither set up nor processed
is_setup, is_processed = exp_builder._db.is_molecule_setup(mol_id)
assert is_setup is False
assert is_processed is False
# The single SMILES has been converted to mol2 file
exp_builder._db._setup_molecules(mol_id)
mol2_path = os.path.join(tmp_dir, pipeline.SetupDatabase.MOLECULES_DIR, mol_id, mol_id + '.mol2')
assert os.path.exists(os.path.join(mol2_path))
assert os.path.getsize(os.path.join(mol2_path)) > 0
# The mol2 represents the right molecule
csv_smiles_str = pipeline.read_csv_lines(self.smiles_path, lines=i).strip().split(',')[1]
mol2_smiles_str = OEMolToSmiles(utils.load_oe_molecules(mol2_path, molecule_idx=0))
assert mol2_smiles_str == csv_smiles_str
# The molecule now both set up and processed
is_setup, is_processed = exp_builder._db.is_molecule_setup(mol_id)
assert is_setup is True
assert is_processed is True
# A new instance of ExperimentBuilder is able to resume with correct molecule
exp_builder = ExperimentBuilder(yaml_content)
is_setup, is_processed = exp_builder._db.is_molecule_setup(mol_id)
assert is_setup is True
assert is_processed is True
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_select_sdf_mol2(self):
"""Check that selection in sdf and mol2 files works."""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
output_dir: {}
setup_dir: .
molecules:
sdf_0:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 0
sdf_1:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 1
mol2_0:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 0
mol2_1:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 1
""".format(tmp_dir, self.sdf_path, self.sdf_path, self.mol2_path, self.mol2_path)
yaml_content = textwrap.dedent(yaml_content)
exp_builder = ExperimentBuilder(yaml_content)
for extension in ['sdf', 'mol2']:
multi_path = getattr(self, extension + '_path')
for model_idx in [0, 1]:
mol_id = extension + '_' + str(model_idx)
# The molecule now is neither set up nor processed
is_setup, is_processed = exp_builder._db.is_molecule_setup(mol_id)
assert is_setup is False
assert is_processed is False
exp_builder._db._setup_molecules(mol_id)
# The setup of the molecule must isolate the frame in a single-frame PDB
single_mol_path = os.path.join(tmp_dir, pipeline.SetupDatabase.MOLECULES_DIR,
mol_id, mol_id + '.' + extension)
assert os.path.exists(os.path.join(single_mol_path))
assert os.path.getsize(os.path.join(single_mol_path)) > 0
if extension == 'mol2':
# OpenEye loses the resname when writing a mol2 file.
mol2_file = utils.Mol2File(single_mol_path)
assert len(list(mol2_file.resnames)) == 1
assert mol2_file.resname != '<0>'
# sdf files must be converted to mol2 to be fed to antechamber
if extension == 'sdf':
single_mol_path = os.path.join(tmp_dir, pipeline.SetupDatabase.MOLECULES_DIR,
mol_id, mol_id + '.mol2')
assert os.path.exists(os.path.join(single_mol_path))
assert os.path.getsize(os.path.join(single_mol_path)) > 0
# Check antechamber parametrization
single_mol_path = os.path.join(tmp_dir, pipeline.SetupDatabase.MOLECULES_DIR,
mol_id, mol_id + '.gaff.mol2')
assert os.path.exists(os.path.join(single_mol_path))
assert os.path.getsize(os.path.join(single_mol_path)) > 0
# The positions must be approximately correct (antechamber move the molecule)
selected_oe_mol = utils.load_oe_molecules(single_mol_path, molecule_idx=0)
selected_pos = utils.get_oe_mol_positions(selected_oe_mol)
second_oe_mol = utils.load_oe_molecules(multi_path, molecule_idx=model_idx)
second_pos = utils.get_oe_mol_positions(second_oe_mol)
assert selected_oe_mol.NumConfs() == 1
assert np.allclose(selected_pos, second_pos, atol=1e-1)
# The molecule now both set up and processed
is_setup, is_processed = exp_builder._db.is_molecule_setup(mol_id)
assert is_setup is True
assert is_processed is True
# A new instance of ExperimentBuilder is able to resume with correct molecule
exp_builder = ExperimentBuilder(yaml_content)
is_setup, is_processed = exp_builder._db.is_molecule_setup(mol_id)
assert is_setup is True
assert is_processed is True
def test_system_expansion():
"""Combinatorial systems are correctly expanded."""
# We need 2 combinatorial systems
template_script = get_template_script()
template_system = template_script['systems']['implicit-system']
del template_system['leap']
template_script['systems'] = {'system1': template_system.copy(),
'system2': template_system.copy()}
template_script['systems']['system1']['receptor'] = utils.CombinatorialLeaf(['Abl', 'T4Lysozyme'])
template_script['systems']['system2']['ligand'] = utils.CombinatorialLeaf(['p-xylene', 'toluene'])
template_script['experiments']['system'] = utils.CombinatorialLeaf(['system1', 'system2'])
# Expected expanded script
expected_script = yank_load("""
systems:
system1_Abl: {receptor: Abl, ligand: p-xylene, solvent: GBSA-OBC2}
system1_T4Lysozyme: {receptor: T4Lysozyme, ligand: p-xylene, solvent: GBSA-OBC2}
system2_pxylene: {receptor: T4Lysozyme, ligand: p-xylene, solvent: GBSA-OBC2}
system2_toluene: {receptor: T4Lysozyme, ligand: toluene, solvent: GBSA-OBC2}
experiments:
system: !Combinatorial ['system1_Abl', 'system1_T4Lysozyme', 'system2_pxylene', 'system2_toluene']
protocol: absolute-binding
""")
expanded_script = template_script.copy()
expanded_script['systems'] = expected_script['systems']
expanded_script['experiments'] = expected_script['experiments']
assert ExperimentBuilder(template_script)._expand_systems(template_script) == expanded_script
def test_exp_sequence():
"""Test all experiments in a sequence are parsed."""
yaml_content = """
---
molecules:
rec:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
lig:
name: lig
leap: {{parameters: leaprc.gaff}}
solvents:
solv1:
nonbonded_method: NoCutoff
solv2:
nonbonded_method: PME
nonbonded_cutoff: 1*nanometer
clearance: 10*angstroms
protocols:{}
systems:
system1:
receptor: rec
ligand: lig
solvent: !Combinatorial [solv1, solv2]
system2:
receptor: rec
ligand: lig
solvent: solv1
experiment1:
system: system1
protocol: absolute-binding
experiment2:
system: system2
protocol: absolute-binding
experiments: [experiment1, experiment2]
""".format(examples_paths()['lysozyme'], standard_protocol)
exp_builder = ExperimentBuilder(textwrap.dedent(yaml_content))
assert len(exp_builder._experiments) == 2
# ==============================================================================
# Systems pipeline
# ==============================================================================
def test_setup_implicit_system_leap():
"""Create prmtop and inpcrd for implicit solvent protein-ligand system."""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
exp_builder = ExperimentBuilder(yaml_content)
output_dir = os.path.dirname(
exp_builder._db.get_system('implicit-system')[0].position_path)
last_modified_path = os.path.join(output_dir, 'complex.prmtop')
last_modified = os.stat(last_modified_path).st_mtime
# Test that output files exist and there is no water
for phase in ['complex', 'solvent']:
found_resnames = set()
pdb_path = os.path.join(output_dir, phase + '.pdb')
prmtop_path = os.path.join(output_dir, phase + '.prmtop')
inpcrd_path = os.path.join(output_dir, phase + '.inpcrd')
with open(pdb_path, 'r') as f:
for line in f:
if len(line) > 10 and line[:5] != 'CRYST':
found_resnames.add(line[17:20])
assert os.path.exists(prmtop_path)
assert os.path.exists(inpcrd_path)
assert os.path.getsize(prmtop_path) > 0
assert os.path.getsize(inpcrd_path) > 0
assert 'MOL' in found_resnames
assert 'WAT' not in found_resnames
# Test that another call do not regenerate the system
time.sleep(0.5) # st_mtime doesn't have much precision
exp_builder._db.get_system('implicit-system')
assert last_modified == os.stat(last_modified_path).st_mtime
def test_setup_explicit_system_leap():
"""Create prmtop and inpcrd protein-ligand system in explicit solvent."""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
exp_builder = ExperimentBuilder(yaml_content)
output_dir = os.path.dirname(
exp_builder._db.get_system('explicit-system')[0].position_path)
# Test that output file exists and that there is water
expected_resnames = {'complex': set(['BEN', 'TOL', 'WAT']),
'solvent': set(['TOL', 'WAT'])}
for phase in expected_resnames:
found_resnames = set()
pdb_path = os.path.join(output_dir, phase + '.pdb')
prmtop_path = os.path.join(output_dir, phase + '.prmtop')
inpcrd_path = os.path.join(output_dir, phase + '.inpcrd')
with open(pdb_path, 'r') as f:
for line in f:
if len(line) > 10 and line[:5] != 'CRYST':
found_resnames .add(line[17:20])
assert os.path.exists(prmtop_path)
assert os.path.exists(inpcrd_path)
assert os.path.getsize(prmtop_path) > 0
assert os.path.getsize(inpcrd_path) > 0
assert found_resnames == expected_resnames[phase]
def test_neutralize_system():
"""Test whether the system charge is neutralized correctly."""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
yaml_content['systems']['explicit-system']['receptor'] = 'T4Lysozyme'
yaml_content['systems']['explicit-system']['ligand'] = 'p-xylene'
exp_builder = ExperimentBuilder(yaml_content)
output_dir = os.path.dirname(
exp_builder._db.get_system('explicit-system')[0].position_path)
# Test that output file exists and that there are ions
found_resnames = set()
with open(os.path.join(output_dir, 'complex.pdb'), 'r') as f:
for line in f:
if len(line) > 10 and line[:5] != 'CRYST':
found_resnames.add(line[17:20])
assert set(['MOL', 'WAT', 'Cl-']) <= found_resnames
# Check that parameter files exist
prmtop_path = os.path.join(output_dir, 'complex.prmtop')
inpcrd_path = os.path.join(output_dir, 'complex.inpcrd')
assert os.path.exists(prmtop_path)
assert os.path.exists(inpcrd_path)
def get_number_of_ions(exp_builder, phase, system_id):
"""Return number of ions in the system.
Parameters
----------
exp_builder : ExperimentBuilder
The experiment builder.
phase : str
complex, solvent, solvent1, solvent2.
system_id : str
The ID of the system.
Returns
-------
n_pos_ions : int
Number of positive ions in the system.
n_neg_ions : int
Number of negative ions in the system.
n_ionic_strength_ions : int
Expected number of ions needed to reach the desired ionic strength.
"""
# Read in output pdb file to read ionic strength.
if phase == 'complex' or phase == 'solvent1':
phase_id = 0
else:
phase_id = 1
system_filepath = exp_builder._db.get_system_files_paths(system_id)[phase_id].position_path
system_filepath = os.path.splitext(system_filepath)[0] + '.pdb'
system_traj = mdtraj.load(system_filepath)
# Count number of waters and ions.
n_waters = 0
n_pos_ions = 0
n_neg_ions = 0
for res in system_traj.topology.residues:
if res.is_water:
n_waters += 1
elif '+' in res.name:
n_pos_ions += 1
elif '-' in res.name:
n_neg_ions += 1
# Verify that number of ions roughly models the expected ionic strength.
try:
solvent_id = exp_builder._db.systems[system_id]['solvent']
except KeyError:
solvent_id = exp_builder._db.systems[system_id][phase] # solvent1 or solvent2
ionic_strength = exp_builder._db.solvents[solvent_id]['ionic_strength']
n_ionic_strength_ions = int(np.round(n_waters * ionic_strength / (55.41*unit.molar)))
return n_pos_ions, n_neg_ions, n_ionic_strength_ions
@unittest.skipIf(not utils.is_openeye_installed(), "This test requires OpenEye toolkit")
def test_charged_ligand():
"""Check that there are alchemical counterions for charged ligands."""
imatinib_path = examples_paths()['imatinib']
with mmtools.utils.temporary_directory() as tmp_dir:
# receptors = {'Asp': -1, 'Abl': -8} # receptor name -> net charge
# Only run `Asp` on CI as Abl can be very slow
receptors = {'Asp': -1} # receptor name -> net charge
solvent_names = ['PME', 'PMEionic']
updates = yank_load("""
molecules:
Asp:
name: "(3S)-3-amino-4-hydroxy-4-oxo-butanoate"
openeye: {{quacpac: am1-bcc}}
antechamber: {{charge_method: null}}
imatinib:
filepath: {}
openeye: {{quacpac: am1-bcc}}
antechamber: {{charge_method: null}}
explicit-system:
receptor: !Combinatorial {}
ligand: imatinib
pack: yes
solvent: !Combinatorial {}
""".format(imatinib_path, list(receptors.keys()), solvent_names))
yaml_content = get_template_script(tmp_dir, keep_openeye=True)
yaml_content['options']['resume_setup'] = True
yaml_content['molecules'].update(updates['molecules'])
yaml_content['solvents']['PMEionic'] = copy.deepcopy(yaml_content['solvents']['PME'])
yaml_content['solvents']['PMEionic']['ionic_strength'] = '10*millimolar'
yaml_content['systems']['explicit-system'].update(updates['explicit-system'])
exp_builder = ExperimentBuilder(yaml_content)
for receptor in receptors:
for solvent_name in solvent_names:
system_id = 'explicit-system_{}_{}'.format(receptor, solvent_name)
system_files_paths = exp_builder._db.get_system(system_id)
for i, phase_name in enumerate(['complex', 'solvent']):
inpcrd_file_path = system_files_paths[i].position_path
prmtop_file_path = system_files_paths[i].parameters_path
system, topology, _ = pipeline.read_system_files(
inpcrd_file_path, prmtop_file_path, {'nonbondedMethod': openmm.app.PME})
# Identify components.
if phase_name == 'complex':
alchemical_region = 'ligand_atoms'
topography = Topography(topology, ligand_atoms='resname MOL')
# Safety check: receptor must be negatively charged as expected
receptor_net_charge = pipeline.compute_net_charge(system,
topography.receptor_atoms)
assert receptor_net_charge == receptors[receptor]
else:
alchemical_region = 'solute_atoms'
topography = Topography(topology)
# There is a single ligand/solute counterion.
ligand_counterions = pipeline.find_alchemical_counterions(system, topography,
alchemical_region)
assert len(ligand_counterions) == 1
ion_idx = ligand_counterions[0]
ion_atom = next(itertools.islice(topology.atoms(), ion_idx, None))
assert '-' in ion_atom.residue.name
# In complex, there should be both ions even if the system is globally
# neutral (e.g. asp lys system), because of the alchemical ion.
n_pos_ions, n_neg_ions, n_ionic_strength_ions = get_number_of_ions(
exp_builder, phase=phase_name, system_id=system_id)
print(system_id, phase_name, n_ionic_strength_ions, n_pos_ions, n_neg_ions)
# Check correct number of ions.
if phase_name == 'complex':
n_neutralization_ions = abs(receptors[receptor])
if n_ionic_strength_ions > 0:
n_neutralization_ions -= 1 # we don't add an extra anion to alchemically modify
assert n_pos_ions == n_neutralization_ions + n_ionic_strength_ions
assert n_neg_ions == max(1, n_ionic_strength_ions)
else:
assert n_pos_ions == n_ionic_strength_ions
assert n_neg_ions == 1 + n_ionic_strength_ions
def test_ionic_strength():
"""The correct number of ions is added to achieve the requested ionic strength."""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
assert yaml_script['systems']['hydration-system']['solute'] == 'toluene'
assert yaml_script['systems']['hydration-system']['solvent1'] == 'PME'
# Set ionic strength.
yaml_script['solvents']['PME']['ionic_strength'] = '200*millimolar'
# Set up toluene in explicit solvent.
exp_builder = ExperimentBuilder(yaml_script)
exp_builder._db.get_system('hydration-system')
n_pos_ions, n_neg_ions, expected_n_ions = get_number_of_ions(exp_builder, phase='solvent1',
system_id='hydration-system')
assert expected_n_ions > 0 # Otherwise this test doesn't make sense.
assert n_pos_ions == expected_n_ions, '{}, {}'.format(n_pos_ions, expected_n_ions)
assert n_neg_ions == expected_n_ions, '{}, {}'.format(n_neg_ions, expected_n_ions)
def test_setup_explicit_solvation_system():
"""Create prmtop and inpcrd files for solvation free energy in explicit solvent."""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
del yaml_script['experiments']
exp_builder = ExperimentBuilder(yaml_script)
output_dir = os.path.dirname(
exp_builder._db.get_system('hydration-system')[0].position_path)
# Test that output file exists and that it has correct components
expected_resnames = {'solvent1': set(['TOL', 'WAT']), 'solvent2': set(['TOL'])}
for phase in expected_resnames:
found_resnames = set()
pdb_path = os.path.join(output_dir, phase + '.pdb')
prmtop_path = os.path.join(output_dir, phase + '.prmtop')
inpcrd_path = os.path.join(output_dir, phase + '.inpcrd')
with open(pdb_path, 'r') as f:
for line in f:
if len(line) > 10 and line[:5] != 'CRYST':
found_resnames.add(line[17:20])
assert os.path.exists(prmtop_path)
assert os.path.exists(inpcrd_path)
assert os.path.getsize(prmtop_path) > 0
assert os.path.getsize(inpcrd_path) > 0
assert found_resnames == expected_resnames[phase]
def test_setup_solvent_models():
"""Test the solvation with different solvent models works."""
with mmtools.utils.temporary_directory() as tmp_dir:
template_script = get_template_script(tmp_dir)
# Setup solvation system and reduce clearance to make test faster.
template_script['systems']['hydration-system']['solvent1'] = 'PME'
template_script['solvents']['PME']['clearance'] = '3.0 * angstrom'
del template_script['experiments']
# Test solvent models.
for solvent_model in ['tip3p', 'tip4pew', 'tip3pfb', 'tip5p']:
yaml_script = copy.deepcopy(template_script)
yaml_script['solvents']['PME']['solvent_model'] = solvent_model
if solvent_model == 'tip3p' or solvent_model == 'tip4pew':
solvent_parameters = ['leaprc.water.' + solvent_model]
else:
solvent_parameters = ['leaprc.water.tip3p', 'frcmod.' + solvent_model]
yaml_script['solvents']['PME']['leap']['parameters'] = solvent_parameters
yaml_script['options']['setup_dir'] = solvent_model
exp_builder = ExperimentBuilder(yaml_script)
# Infer number of expected atoms per water molecule from model.
expected_water_n_atoms = int(list(filter(str.isdigit, solvent_model))[0])
# Setup the system and check that water residues have expected number of particles.
prmtop_filepath = exp_builder._db.get_system('hydration-system')[0].parameters_path
topology = mdtraj.load_prmtop(prmtop_filepath)
yield assert_equal, topology.residue(1).n_atoms, expected_water_n_atoms
def test_setup_multiple_parameters_system():
"""Set up system with molecule that needs many parameter files."""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
# Force antechamber parametrization of benzene to output frcmod file
exp_builder = ExperimentBuilder(yaml_script)
exp_builder._db._setup_molecules('benzene')
benzene_dir = exp_builder._db.get_molecule_dir('benzene')
frcmod_path = os.path.join(benzene_dir, 'benzene.frcmod')
benzene_path = os.path.join(benzene_dir, 'benzene.gaff.mol2')
# Redefine benzene to use leaprc.gaff and benzene.frcmod
# and set up system for hydration free energy calculation
yaml_script['molecules'] = {
'benzene-frcmod': {'filepath': benzene_path,
'leap': {'parameters': ['leaprc.gaff', frcmod_path]}}}
yaml_script['systems'] = {
'system':
{'solute': 'benzene-frcmod', 'solvent1': 'PME', 'solvent2': 'vacuum',
'leap': {'parameters': 'oldff/leaprc.ff14SB'}}
}
del yaml_script['experiments']
exp_builder = ExperimentBuilder(yaml_script)
system_files_path = exp_builder._db.get_system('system')
# Check that output exist:
for phase in system_files_path:
assert os.path.exists(phase.parameters_path)
assert os.path.exists(phase.position_path)
assert os.path.getsize(phase.parameters_path) > 0
assert os.path.getsize(phase.position_path) > 0
# ==============================================================================
# Platform configuration tests
# ==============================================================================
def test_platform_configuration():
"""Test that the precision for platform is configured correctly."""
available_platforms = [openmm.Platform.getPlatform(i).getName()
for i in range(openmm.Platform.getNumPlatforms())]
for platform_name in available_platforms:
exp_builder = ExperimentBuilder(script='options: {}')
# Reference and CPU platform support only one precision model
if platform_name == 'Reference':
assert_raises(RuntimeError, exp_builder._configure_platform, platform_name, 'mixed')
continue
elif platform_name == 'CPU':
assert_raises(RuntimeError, exp_builder._configure_platform, platform_name, 'double')
continue
# Check that precision is set as expected
for precision in ['mixed', 'double', 'single']:
if platform_name == 'CUDA':
platform = exp_builder._configure_platform(platform_name=platform_name,
platform_precision=precision)
assert platform.getPropertyDefaultValue('CudaPrecision') == precision
assert platform.getPropertyDefaultValue('DeterministicForces') == 'true'
elif platform_name == 'OpenCL':
if ExperimentBuilder._opencl_device_support_precision(precision):
platform = exp_builder._configure_platform(platform_name=platform_name,
platform_precision=precision)
assert platform.getPropertyDefaultValue('OpenCLPrecision') == precision
else:
assert_raises(RuntimeError, exp_builder._configure_platform, platform_name, precision)
def test_default_platform_precision():
"""Test that the precision for platform is set to mixed by default."""
available_platforms = [openmm.Platform.getPlatform(i).getName()
for i in range(openmm.Platform.getNumPlatforms())]
# Determine whether this device OpenCL platform supports double precision
if 'OpenCL' in available_platforms:
opencl_support_double = ExperimentBuilder._opencl_device_support_precision('double')
for platform_name in available_platforms:
# Reference and CPU platform support only one precision model so we don't
# explicitly test them. We still call _configure_platform to be sure that
# precision 'auto' works
exp_builder = ExperimentBuilder(script='options: {}')
platform = exp_builder._configure_platform(platform_name=platform_name,
platform_precision='auto')
if platform_name == 'CUDA':
assert platform.getPropertyDefaultValue('CudaPrecision') == 'mixed'
elif platform_name == 'OpenCL':
if opencl_support_double:
assert platform.getPropertyDefaultValue('OpenCLPrecision') == 'mixed'
else:
assert platform.getPropertyDefaultValue('OpenCLPrecision') == 'single'
# ==============================================================================
# Experiment building
# ==============================================================================
class TestExperimentBuilding(object):
"""Test that options are passed correctly from YAML to the built objects."""
@classmethod
def get_implicit_template_script(cls, output_dir):
"""Return the template script with only an implicit system."""
template_script = get_template_script(output_dir)
# Remove systems we don't need to setup.
del template_script['systems']['explicit-system']
del template_script['systems']['hydration-system']
template_script['experiments']['system'] = 'implicit-system'
return template_script
def check_constructor(self, yaml_script, constructor_description, object_name,
complex_phase_only=False, special_check_func=None):
exp_builder = ExperimentBuilder(script=yaml_script)
for experiment in exp_builder.build_experiments():
phases = experiment.phases
if complex_phase_only:
phases = [phases[0]]
for phase in phases:
for k, v in constructor_description.items():
# Convert constructor strings to quantities if necessary.
try:
v = utils.quantity_from_string(v)
except:
pass
# Obtain instantiated object.
object_instance = phase
for name in object_name.split('.'):
object_instance = getattr(object_instance, name)
# Check class and attributes.
if k == 'type':
assert object_instance.__class__.__name__ == v
else:
assert getattr(object_instance, k) == v
if special_check_func is not None:
special_check_func(phase, constructor_description)
def test_alchemical_phase_factory_building(self):
"""Test that options are passed to AlchemicalPhaseFactory correctly."""
with mmtools.utils.temporary_directory() as tmp_dir:
template_script = self.get_implicit_template_script(tmp_dir)
# AbsoluteAlchemicalFactory options.
template_script['options']['alchemical_pme_treatment'] = 'exact'
# Test that options are passed to AlchemicalPhaseFactory correctly.
exp_builder = ExperimentBuilder(script=template_script)
for experiment in exp_builder.build_experiments():
for phase_factory in experiment.phases:
assert phase_factory.alchemical_factory.alchemical_pme_treatment == 'exact'
# Overwrite AbsoluteAlchemicalFactory default for disable_alchemical_dispersion_correction.
assert phase_factory.alchemical_factory.disable_alchemical_dispersion_correction == True
def test_restraint_building(self):
"""Test that experiment restraints are built correctly."""
with mmtools.utils.temporary_directory() as tmp_dir:
template_script = self.get_implicit_template_script(tmp_dir)
# Restraint options.
template_script['experiments']['restraint'] = {
'type': 'Harmonic',
'restrained_receptor_atoms': [10, 11, 12],
'restrained_ligand_atoms': 'resname MOL',
'spring_constant': '8*kilojoule_per_mole/nanometers**2'
}
# Test that options are passed to the restraint correctly.
constructor_description = template_script['experiments']['restraint']
self.check_constructor(template_script, constructor_description,
object_name='restraint', complex_phase_only=True)
def test_sampler_building(self):
"""Test that the experiment sampler is built correctly."""
with mmtools.utils.temporary_directory() as tmp_dir:
template_script = self.get_implicit_template_script(tmp_dir)
template_script['options']['resume_setup'] = True
default_number_of_iterations = template_script['options']['default_number_of_iterations']
# Add tested samplers.
template_script['samplers'] = {
'my-sampler1': {
'type': 'ReplicaExchangeSampler',
'number_of_iterations': 9,
'replica_mixing_scheme': 'swap-neighbors',
},
'my-sampler2': {
'type': 'MultiStateSampler',
'locality': 5
}
}
def check_default_number_of_iterations(phase, sampler_description):
if 'number_of_iterations' not in sampler_description:
assert phase.sampler.number_of_iterations == default_number_of_iterations
# Test that options are passed to the sampler correctly.
for sampler_id, sampler_description in template_script['samplers'].items():
template_script['experiments']['sampler'] = sampler_id
constructor_description = template_script['samplers'][sampler_id]
yield (self.check_constructor, template_script, constructor_description,
'sampler', None, check_default_number_of_iterations)
def test_mcmc_move_building(self):
"""Test that the experiment MCMCMoves are built correctly."""
with mmtools.utils.temporary_directory() as tmp_dir:
template_script = self.get_implicit_template_script(tmp_dir)
template_script['options']['resume_setup'] = True
template_script['experiments']['sampler'] = 'repex'
print(template_script['samplers'])
# Add tested samplers.
template_script['mcmc_moves'] = {
'my-move1': {
'type': 'LangevinSplittingDynamicsMove',
'reassign_velocities': False,
'splitting': 'RVOVR',
'n_steps': 10,
'timestep': '2.0*femtosecond'
},
'my-move2': {'type': 'SequenceMove', 'move_list': [
{'type': 'MCDisplacementMove', 'displacement_sigma': '5.0*nanometers'},
{'type': 'LangevinDynamicsMove'}
]}
}
# Test default MCMCMove.
exp_builder = ExperimentBuilder(script=template_script)
for experiment in exp_builder.build_experiments():
for phase in experiment.phases:
mcmc_move = phase.sampler.mcmc_moves
if len(phase.topography.ligand_atoms) > 0:
assert type(mcmc_move) is mmtools.mcmc.SequenceMove
assert len(mcmc_move.move_list) == 3
assert type(mcmc_move.move_list[0]) is mmtools.mcmc.MCDisplacementMove
assert type(mcmc_move.move_list[1]) is mmtools.mcmc.MCRotationMove
assert mcmc_move.move_list[0].atom_subset == phase.topography.ligand_atoms
langevin_move = mcmc_move.move_list[2]
else:
langevin_move = mcmc_move
# Check default parameters LangevinMove
assert type(langevin_move) is mmtools.mcmc.LangevinSplittingDynamicsMove
assert langevin_move.timestep == exp_builder._options['default_timestep']
assert langevin_move.n_steps == exp_builder._options['default_nsteps_per_iteration']
# Test that custom MCMCMoves are built correctly.
template_script['samplers']['repex']['mcmc_moves'] = 'my-move1'
constructor_description = template_script['mcmc_moves']['my-move1']
self.check_constructor(template_script, constructor_description,
object_name='sampler.mcmc_moves')
template_script['samplers']['repex']['mcmc_moves'] = 'my-move2'
exp_builder = ExperimentBuilder(script=template_script)
for experiment in exp_builder.build_experiments():
for phase in experiment.phases:
mcmc_move = phase.sampler.mcmc_moves
assert type(mcmc_move) is mmtools.mcmc.SequenceMove
assert len(mcmc_move.move_list) == 2
assert type(mcmc_move.move_list[0]) is mmtools.mcmc.MCDisplacementMove
assert type(mcmc_move.move_list[1]) is mmtools.mcmc.LangevinDynamicsMove
assert mcmc_move.move_list[0].displacement_sigma == 5.0*unit.nanometers
# ==============================================================================
# Experiment execution
# ==============================================================================
def test_expand_experiments():
"""Test that job_id and n_jobs limit the number of experiments run."""
template_script = get_template_script()
experiment_systems = utils.CombinatorialLeaf(['explicit-system', 'implicit-system', 'hydration-system'])
template_script['experiments']['system'] = experiment_systems
exp_builder = ExperimentBuilder(script=template_script, job_id=1, n_jobs=2)
experiments = list(exp_builder._expand_experiments())
assert len(experiments) == 2
exp_builder = ExperimentBuilder(script=template_script, job_id=2, n_jobs=2)
experiments = list(exp_builder._expand_experiments())
assert len(experiments) == 1
def test_yaml_creation():
"""Test the content of generated single experiment YAML files."""
ligand_path = examples_paths()['p-xylene']
toluene_path = examples_paths()['toluene']
with mmtools.utils.temporary_directory() as tmp_dir:
molecules = """
T4lysozyme:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}""".format(examples_paths()['lysozyme'])
solvent = """
vacuum:
nonbonded_method: NoCutoff"""
protocol = indent(standard_protocol)
system = """
system:
ligand: p-xylene
receptor: T4lysozyme
solvent: vacuum"""
experiment = """
protocol: absolute-binding
system: system"""
yaml_content = """
---
options:
output_dir: {}
molecules:{}
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
benzene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:{}
GBSA-OBC2:
nonbonded_method: NoCutoff
implicit_solvent: OBC2
systems:{}
protocols:{}
experiments:{}
""".format(os.path.relpath(tmp_dir), molecules,
os.path.relpath(ligand_path), toluene_path,
solvent, system, protocol, experiment)
# We need to check whether the relative paths to the output directory and
# for p-xylene are handled correctly while absolute paths (T4lysozyme) are
# left untouched
expected_yaml_content = textwrap.dedent("""
---
version: '{}'
options:
experiments_dir: .
output_dir: .
molecules:{}
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:{}
systems:{}
protocols:{}
experiments:{}
""".format(HIGHEST_VERSION, molecules, os.path.relpath(ligand_path, tmp_dir),
solvent, system, protocol, experiment))
expected_yaml_content = expected_yaml_content[1:] # remove first '\n'
exp_builder = ExperimentBuilder(textwrap.dedent(yaml_content))
# during setup we can modify molecule's fields, so we need
# to check that it doesn't affect the YAML file exported
experiment_dict = yaml.load(experiment, Loader=yaml.FullLoader)
exp_builder._db.get_system(experiment_dict['system'])
generated_yaml_path = os.path.join(tmp_dir, 'experiment.yaml')
exp_builder._generate_yaml(experiment_dict, generated_yaml_path)
with open(generated_yaml_path, 'r') as f:
assert yaml.load(f, Loader=yaml.FullLoader) == yank_load(expected_yaml_content)
def test_yaml_extension():
"""Test that extending a yaml content with additional data produces the correct fusion"""
ligand_path = examples_paths()['p-xylene']
toluene_path = examples_paths()['toluene']
with mmtools.utils.temporary_directory() as tmp_dir:
molecules = """
T4lysozyme:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}""".format(examples_paths()['lysozyme'])
solvent = """
vacuum:
nonbonded_method: NoCutoff"""
protocol = indent(standard_protocol)
system = """
system:
ligand: p-xylene
receptor: T4lysozyme
solvent: vacuum"""
experiment = """
protocol: absolute-binding
system: system"""
num_iterations = 5
replacement_solvent = "HTC"
yaml_content = """
---
options:
output_dir: {}
molecules:{}
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
benzene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:{}
GBSA-OBC2:
nonbonded_method: NoCutoff
implicit_solvent: OBC2
systems:{}
protocols:{}
experiments:{}
""".format(os.path.relpath(tmp_dir), molecules,
os.path.relpath(ligand_path), toluene_path,
solvent, system, protocol, experiment)
yaml_extension = """
options:
default_number_of_iterations: {}
solvents:
GBSA-OBC2:
implicit_solvent: HCT
""".format(num_iterations, replacement_solvent)
# We need to check whether the relative paths to the output directory and
# for p-xylene are handled correctly while absolute paths (T4lysozyme) are
# left untouched
expected_yaml_content = textwrap.dedent("""
---
version: '{}'
options:
experiments_dir: .
output_dir: .
default_number_of_iterations: {}
molecules:{}
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:{}
systems:{}
protocols:{}
experiments:{}
""".format(HIGHEST_VERSION, num_iterations, molecules, os.path.relpath(ligand_path, tmp_dir),
solvent, system, protocol, experiment))
expected_yaml_content = expected_yaml_content[1:] # remove first '\n'
exp_builder = ExperimentBuilder(textwrap.dedent(yaml_content))
exp_builder.update_yaml(yaml_extension)
# during setup we can modify molecule's fields, so we need
# to check that it doesn't affect the YAML file exported
experiment_dict = yaml.load(experiment, Loader=yaml.FullLoader)
exp_builder._db.get_system(experiment_dict['system'])
generated_yaml_path = os.path.join(tmp_dir, 'experiment.yaml')
exp_builder._generate_yaml(experiment_dict, generated_yaml_path)
with open(generated_yaml_path, 'r') as f:
assert yaml.load(f, Loader=yaml.FullLoader) == yank_load(expected_yaml_content)
@attr('slow') # Skip on Travis-CI
def test_run_experiment_from_amber_files():
"""Test experiment run from prmtop/inpcrd files."""
complex_path = examples_paths()['bentol-complex']
solvent_path = examples_paths()['bentol-solvent']
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
yaml_script['options']['anisotropic_dispersion_cutoff'] = None
del yaml_script['molecules'] # we shouldn't need any molecule
del yaml_script['solvents']['PME']['clearance'] # we shouldn't need this
yaml_script['systems'] = {'explicit-system':
{'phase1_path': complex_path, 'phase2_path': solvent_path,
'ligand_dsl': 'resname TOL', 'solvent': 'PME'}}
exp_builder = ExperimentBuilder(yaml_script)
exp_builder._check_resume() # check_resume should not raise exceptions
exp_builder.run_experiments()
# The experiments folders are correctly named and positioned
output_dir = exp_builder._get_experiment_dir('')
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'complex.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent.nc'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.yaml'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f, Loader=yaml.FullLoader) == [['complex', 1], ['solvent', -1]]
@attr('slow') # Skip on Travis-CI
def test_run_experiment_from_gromacs_files():
"""Test experiment run from top/gro files."""
complex_path = examples_paths()['pxylene-complex']
solvent_path = examples_paths()['pxylene-solvent']
include_path = examples_paths()['pxylene-gro-include']
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
yaml_script['options']['anisotropic_dispersion_cutoff'] = None
del yaml_script['molecules'] # we shouldn't need any molecule
yaml_script['systems'] = {'explicit-system':
{'phase1_path': complex_path, 'phase2_path': solvent_path,
'ligand_dsl': 'resname "p-xylene"', 'solvent': 'PME',
'gromacs_include_dir': include_path}}
yaml_script['experiments']['system'] = 'explicit-system'
exp_builder = ExperimentBuilder(yaml_script)
exp_builder._check_resume() # check_resume should not raise exceptions
exp_builder.run_experiments()
# The experiments folders are correctly named and positioned
output_dir = exp_builder._get_experiment_dir('')
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'complex.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent.nc'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.yaml'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f, Loader=yaml.FullLoader) == [['complex', 1], ['solvent', -1]]
@attr('slow') # Skip on Travis-CI
def test_run_experiment_from_xml_files():
"""Test hydration experiment run from pdb/xml files."""
solvent_path = examples_paths()['toluene-solvent']
vacuum_path = examples_paths()['toluene-vacuum']
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
del yaml_script['molecules'] # we shouldn't need any molecule
yaml_script['systems'] = {'explicit-system':
{'phase1_path': solvent_path, 'phase2_path': vacuum_path,
'solvent_dsl': 'not resname TOL'}}
exp_builder = ExperimentBuilder(yaml_script)
exp_builder._check_resume() # check_resume should not raise exceptions
exp_builder.run_experiments()
# The experiments folders are correctly named and positioned
output_dir = exp_builder._get_experiment_dir('')
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'complex.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent.nc'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.yaml'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f, Loader=yaml.FullLoader) == [['complex', 1], ['solvent', -1]]
@attr('slow') # Skip on Travis-CI
def test_run_experiment():
"""Test experiment run and resuming."""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
resume_setup: no
resume_simulation: no
default_number_of_iterations: 0
output_dir: {}
setup_dir: ''
experiments_dir: ''
minimize: no
annihilate_sterics: yes
molecules:
T4lysozyme:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
select: 0
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:
vacuum:
nonbonded_method: NoCutoff
GBSA-OBC2:
nonbonded_method: NoCutoff
implicit_solvent: OBC2
protocols:{}
systems:
system:
receptor: T4lysozyme
ligand: p-xylene
solvent: !Combinatorial [vacuum, GBSA-OBC2]
experiments:
system: system
protocol: absolute-binding
restraint:
type: FlatBottom
spring_constant: 0.6*kilocalorie_per_mole/angstroms**2
well_radius: 5.2*nanometers
restrained_receptor_atoms: 1644
restrained_ligand_atoms: 2609
options:
temperature: 302.0*kelvin
""".format(tmp_dir, examples_paths()['lysozyme'], examples_paths()['p-xylene'],
indent(standard_protocol))
exp_builder = ExperimentBuilder(textwrap.dedent(yaml_content))
# Now check_setup_resume should not raise exceptions
exp_builder._check_resume()
# We setup a molecule and with resume_setup: now we can't do the experiment
err_msg = ''
exp_builder._options['resume_setup'] = False
exp_builder._db._setup_molecules('p-xylene')
try:
exp_builder.run_experiments()
except YamlParseError as e:
err_msg = str(e)
assert 'molecule' in err_msg
# Same thing with a system
err_msg = ''
system_dir = os.path.dirname(
exp_builder._db.get_system('system_GBSAOBC2')[0].position_path)
try:
exp_builder.run_experiments()
except YamlParseError as e:
err_msg = str(e)
assert 'system' in err_msg
# Now we set resume_setup to True and things work
exp_builder._options['resume_setup'] = True
ligand_dir = exp_builder._db.get_molecule_dir('p-xylene')
frcmod_file = os.path.join(ligand_dir, 'p-xylene.frcmod')
prmtop_file = os.path.join(system_dir, 'complex.prmtop')
molecule_last_touched = os.stat(frcmod_file).st_mtime
system_last_touched = os.stat(prmtop_file).st_mtime
exp_builder.run_experiments()
# Neither the system nor the molecule has been processed again
assert molecule_last_touched == os.stat(frcmod_file).st_mtime
assert system_last_touched == os.stat(prmtop_file).st_mtime
# The experiments folders are correctly named and positioned
for exp_name in ['systemvacuum', 'systemGBSAOBC2']:
# The output directory must be the one in the experiment section
output_dir = os.path.join(tmp_dir, exp_name)
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'complex.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent.nc'))
assert os.path.isfile(os.path.join(output_dir, exp_name + '.yaml'))
assert os.path.isfile(os.path.join(output_dir, exp_name + '.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f, Loader=yaml.FullLoader) == [['complex', 1], ['solvent', -1]]
# Now we can't run the experiment again with resume_simulation: no
exp_builder._options['resume_simulation'] = False
try:
exp_builder.run_experiments()
except YamlParseError as e:
err_msg = str(e)
assert 'experiment' in err_msg
# We set resume_simulation: yes and now things work
exp_builder._options['resume_simulation'] = True
exp_builder.run_experiments()
def solvation_stock(tmp_dir, overwrite_options=None):
"""Stock actions to take for a solvation run"""
yaml_script = get_template_script(tmp_dir)
yaml_script['experiments']['system'] = 'hydration-system'
yaml_script['experiments']['protocol'] = 'hydration-protocol'
# Pop out all non-hydration system items for setup speed
molecule_poppers = []
for molecule in yaml_script['molecules'].keys():
if molecule not in yaml_script['systems']['hydration-system'].values():
molecule_poppers.append(molecule)
for molecule in molecule_poppers:
yaml_script['molecules'].pop(molecule, None)
system_poppers = []
for system in yaml_script['systems'].keys():
if system != 'hydration-system':
system_poppers.append(system)
for system in system_poppers:
yaml_script['systems'].pop(system, None)
if overwrite_options is not None:
yaml_script = utils.update_nested_dict(yaml_script, overwrite_options)
exp_builder = ExperimentBuilder(yaml_script)
exp_builder._check_resume() # check_resume should not raise exceptions
exp_builder.run_experiments()
return yaml_script, exp_builder
def test_run_solvation_experiment():
"""Test solvation free energy experiment run."""
with mmtools.utils.temporary_directory() as tmp_dir:
_, exp_builder = solvation_stock(tmp_dir)
# The experiments folders are correctly named and positioned
output_dir = exp_builder._get_experiment_dir('')
assert os.path.isdir(output_dir)
for solvent in ['solvent1.nc', 'solvent2.nc']:
solvent_path = os.path.join(output_dir, solvent)
reporter = mmtools.multistate.MultiStateReporter(solvent_path, open_mode=None)
assert reporter.storage_exists()
del reporter
assert os.path.isfile(os.path.join(output_dir, 'experiments.yaml'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f, Loader=yaml.FullLoader) == [['solvent1', 1], ['solvent2', -1]]
class TestTrailblazeAlchemicalPath:
"""Test suite for the automatic discretization of the alchemical path."""
def _get_hydration_free_energy_script(self, tmp_dir, alchemical_path='auto', trailblazer_options=None):
# Setup only 1 hydration free energy system in implicit solvent and vacuum.
yaml_script = get_template_script(tmp_dir, systems=['hydration-system'])
yaml_script['systems']['hydration-system']['solvent1'] = 'GBSA-OBC2'
yaml_script['experiments']['system'] = 'hydration-system'
yaml_script['experiments']['protocol'] = 'hydration-protocol'
# We run trailblaze only for the phase in vacuum.
yaml_script['protocols']['hydration-protocol']['solvent2']['alchemical_path'] = alchemical_path
if trailblazer_options is not None:
yaml_script['protocols']['hydration-protocol']['solvent2']['trailblazer_options'] = trailblazer_options
# Make the generation of the trailblaze samples inexpensive.
yaml_script['mcmc_moves']['single']['n_steps'] = 1
yaml_script['mcmc_moves']['single']['timestep'] = '0.5*femtosecond'
yaml_script['samplers']['repex']['mcmc_moves'] = 'single'
yaml_script['experiments']['sampler'] = 'repex'
yaml_script['options']['platform'] = 'CPU'
return yaml_script
def test_generate_lambda_alchemical_function(self):
"""The alchemical functions generated by ExperimentBuilder._generate_lambda_alchemical_function are correct."""
from openmmtools.utils import math_eval
def evaluate(expr, l):
variables = {'lambda': l}
return math_eval(expr, variables)
# Each test case are [(lambda0, lambda1), (f(lambda0), f([lambda0+lambda1]/2), f(lambda1))]
# where the second tuple in the list are the expected values of the function.
test_cases = [(0, 1), (1, 0), (2, 3), (3, 2), (4, 8), (10, 5)]
for lambda0, lambda1 in test_cases:
expr = ExperimentBuilder._generate_lambda_alchemical_function(lambda0, lambda1)
print(lambda0, lambda1, ':', expr)
assert evaluate(expr, lambda0) == 0.0
assert evaluate(expr, (lambda0 + lambda1)/2) == 0.5
assert evaluate(expr, lambda1) == 1.0
# The funciton must be constant after the end states.
if lambda0 < lambda1:
assert evaluate(expr, lambda0-1) == 0.0
assert evaluate(expr, lambda1+1) == 1.0
else:
assert evaluate(expr, lambda0+1) == 0.0
assert evaluate(expr, lambda1-1) == 1.0
def test_determine_trailblaze_path(self):
"""Test the various paths generated by ExperimentBuilder._determine_trailblaze_path with alchemical functions"""
# Mock objects to test the static function.
class MockTopography(typing.NamedTuple):
receptor_atoms: typing.List
solvent_atoms: typing.List
class MockAlchemicalRegions(typing.NamedTuple):
annihilate_electrostatics: typing.List
annihilate_sterics: typing.List
class MockPhaseFactory:
def __init__(self, restraint, is_vacuum, annihilate_electrostatics, annihilate_sterics):
self.restraint = restraint
if is_vacuum:
self.topography = MockTopography([], [])
else:
self.topography = MockTopography([0], [1])
self.alchemical_regions = MockAlchemicalRegions(annihilate_electrostatics, annihilate_sterics)
# Each test case is (mock_phase_factory_init_kwargs, expected_alchemical_functions, expected_state_parameters).
p = ExperimentBuilder._generate_lambda_alchemical_function # Shortcut.
test_cases = [
(
dict(restraint=True, is_vacuum=False, annihilate_electrostatics=True, annihilate_sterics=False),
dict(lambda_restraints=p(3, 2), lambda_electrostatics=p(1, 2), lambda_sterics=p(0, 1)),
[('lambda', [3.0, 0.0])]
),
(
dict(restraint=None, is_vacuum=False, annihilate_electrostatics=True, annihilate_sterics=False),
dict(lambda_electrostatics=p(1, 2), lambda_sterics=p(0, 1)),
[('lambda', [2.0, 0.0])]
),
(
dict(restraint=None, is_vacuum=True, annihilate_electrostatics=True, annihilate_sterics=False),
dict(lambda_electrostatics=p(0, 1), lambda_sterics='1.0'),
[('lambda', [1.0, 0.0])]
),
(
dict(restraint=True, is_vacuum=True, annihilate_electrostatics=False, annihilate_sterics=False),
dict(lambda_restraints=p(1, 0), lambda_electrostatics='1.0', lambda_sterics='1.0'),
[('lambda', [1.0, 0.0])]
),
(
dict(restraint=restraints.RMSD(), is_vacuum=False, annihilate_electrostatics=True, annihilate_sterics=False),
dict(lambda_restraints=p(4, 3) + ' - (' + p(1, 0) + ')', lambda_electrostatics=p(2, 3), lambda_sterics=p(1, 2)),
[('lambda', [4.0, 0.0])]
),
]
for idx, (phase_factory_kwargs, expected_alchemical_functions, expected_state_parameters) in enumerate(test_cases):
phase_factory = MockPhaseFactory(**phase_factory_kwargs)
alchemical_functions, states_parameters = ExperimentBuilder._determine_trailblaze_path(
phase_factory, alchemical_path='auto', generate_alchemical_functions=True)
# Convert alchemical functions objects to string expressions for comparison.
for parameter_name, alchemical_function in alchemical_functions.items():
alchemical_functions[parameter_name] = alchemical_function._expression
err_msg = 'test case ' + str(idx) + ':\n\nExpected:\n{}\n\nObtained:\n{}'
assert alchemical_functions == expected_alchemical_functions, err_msg.format(pformat(expected_alchemical_functions),
pformat(alchemical_functions))
assert states_parameters == expected_state_parameters, err_msg.format(pformat(expected_state_parameters),
pformat(states_parameters))
def test_auto_alchemical_path(self):
"""Test automatic alchemical path found by thermodynamic trailblazing when the option 'auto' is set."""
with mmtools.utils.temporary_directory() as tmp_dir:
# Setup only 1 hydration free energy system in implicit solvent and vacuum.
yaml_script = self._get_hydration_free_energy_script(
tmp_dir, alchemical_path='auto',
trailblazer_options={'n_equilibration_iterations': 0}
)
yaml_script['options']['resume_setup'] = False
yaml_script['options']['resume_simulation'] = False
exp_builder = ExperimentBuilder(yaml_script)
# ExperimentBuilder._get_experiment_protocol handles dummy protocols.
experiment_path, experiment_description = next(exp_builder._expand_experiments())
with assert_raises(FileNotFoundError):
exp_builder._get_experiment_protocol(experiment_path, experiment_description)
dummy_protocol = exp_builder._get_experiment_protocol(experiment_path, experiment_description,
use_dummy_protocol=True)
assert dummy_protocol['solvent2']['alchemical_path'] == {} # This is the dummy protocol.
# check_resume should not raise exceptions at this point.
exp_builder._check_resume()
# Building the experiment should generate the alchemical path.
for experiment in exp_builder.build_experiments():
pass
# The experiment has the correct path. Only the path of solvent2 has been generated.
expected_generated_protocol = {
'lambda_electrostatics': [1.0, 0.0],
'lambda_sterics': [1.0, 1.0]
}
assert experiment.phases[0].protocol == yaml_script['protocols']['hydration-protocol']['solvent1']['alchemical_path']
assert experiment.phases[1].protocol == expected_generated_protocol
# Resuming fails at this point because we have
# generated the YAML file containing the protocol.
with assert_raises(YamlParseError):
next(exp_builder.build_experiments())
# When resuming, ExperimentBuilder should recycle the path from the previous run.
generated_yaml_script_path = exp_builder._get_generated_yaml_script_path('')
last_touched_yaml = os.stat(generated_yaml_script_path).st_mtime
exp_builder._options['resume_setup'] = True
exp_builder._options['resume_simulation'] = True
exp_builder.run_experiments()
assert last_touched_yaml == os.stat(generated_yaml_script_path).st_mtime
def test_start_from_trailblaze_samples_path(self):
"""Test the correct implementation of the option start_from_trailblaze_samples."""
with mmtools.utils.temporary_directory() as tmp_dir:
# Setup only 1 hydration free energy system in implicit solvent and vacuum.
yaml_script = self._get_hydration_free_energy_script(
tmp_dir, alchemical_path='auto',
trailblazer_options={'n_equilibration_iterations': 0}
)
exp_builder = ExperimentBuilder(yaml_script)
# YANK by default takes advantage of the samples generated during the trailblaze.
for experiment in exp_builder.build_experiments():
pass
assert isinstance(experiment.phases[0].sampler_states, mmtools.states.SamplerState)
trailblaze_sampler_states = experiment.phases[1].sampler_states
assert len(trailblaze_sampler_states) == 2
# Unless the option start_from_trailblaze_samples is False.
exp_builder._options['start_from_trailblaze_samples'] = False
for experiment in exp_builder.build_experiments():
pass
input_sampler_state = experiment.phases[1].sampler_states
assert isinstance(input_sampler_state, mmtools.states.SamplerState)
assert not np.allclose(trailblaze_sampler_states[0].positions, input_sampler_state.positions)
assert not np.allclose(trailblaze_sampler_states[1].positions, input_sampler_state.positions)
def test_alchemical_functions_path(self):
"""Test automatic alchemical path found from alchemical functions."""
with mmtools.utils.temporary_directory() as tmp_dir:
# Setup only 1 hydration free energy system in implicit solvent and vacuum.
yaml_script = self._get_hydration_free_energy_script(
tmp_dir,
alchemical_path={'lambda_electrostatics': 'lambda',
'lambda_sterics': 'lambda',
'lambda': [0.0, 1.0]},
trailblazer_options={'function_variable_name': 'lambda',
'n_equilibration_iterations': 0}
)
exp_builder = ExperimentBuilder(yaml_script)
# Building the experiment should generate the alchemical path.
for experiment in exp_builder.build_experiments():
pass
# The experiment has the correct path. Only the path of solvent2 has been generated.
expected_generated_protocol = {
'lambda_electrostatics': [0.0, 1.0],
'lambda_sterics': [0.0, 1.0]
}
assert experiment.phases[0].protocol == yaml_script['protocols']['hydration-protocol']['solvent1']['alchemical_path']
assert experiment.phases[1].protocol == expected_generated_protocol, experiment.phases[1].protocol
# YANK takes advantage of the samples generated during the trailblaze.
assert isinstance(experiment.phases[0].sampler_states, mmtools.states.SamplerState)
assert len(experiment.phases[1].sampler_states) == 2
def test_experiment_nan():
"""Test that eventual NaN's are handled and that experiment is signal as completed."""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_script = get_functionality_script(output_directory=tmp_dir, experiment_repeats=0, number_nan_repeats=1)
exp_builder = ExperimentBuilder(script=yaml_script)
with moltools.utils.temporary_cd(exp_builder._script_dir):
exp_builder._check_resume()
exp_builder._setup_experiments()
exp_builder._generate_experiments_protocols()
for experiment in exp_builder._expand_experiments():
is_completed = exp_builder._run_experiment(experiment)
assert is_completed
def test_multi_experiment_nan():
"""Test that no one experiment going NaN crashes the simulation"""
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_script = get_functionality_script(output_directory=tmp_dir,
number_of_iter=2,
experiment_repeats=2,
number_nan_repeats=2)
exp_builder = ExperimentBuilder(yaml_script)
# This should run correctly and not raise errors
exp_builder.run_experiments()
if __name__ == '__main__':
test_run_solvation_experiment()
|
69a88c7deaba78a82c2056cadd228caa670b3422
|
b2fef77e77f77b6cfd83da4ec2f89cbe73330844
|
/tests/test_deepgrow_transforms.py
|
1328e1343936c583b60a06d4094a199d20955c73
|
[
"Apache-2.0"
] |
permissive
|
Project-MONAI/MONAI
|
8ef2593cc5fd1cd16e13464f927fe563fe3f5bac
|
e48c3e2c741fa3fc705c4425d17ac4a5afac6c47
|
refs/heads/dev
| 2023-09-02T00:21:04.532596
| 2023-09-01T06:46:45
| 2023-09-01T06:46:45
| 214,485,001
| 4,805
| 996
|
Apache-2.0
| 2023-09-14T15:19:30
| 2019-10-11T16:41:38
|
Python
|
UTF-8
|
Python
| false
| false
| 16,313
|
py
|
test_deepgrow_transforms.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
import numpy as np
from parameterized import parameterized
from monai.apps.deepgrow.transforms import (
AddGuidanceFromPointsd,
AddGuidanceSignald,
AddInitialSeedPointd,
AddRandomGuidanced,
Fetch2DSliced,
FindAllValidSlicesd,
FindDiscrepancyRegionsd,
ResizeGuidanced,
RestoreLabeld,
SpatialCropForegroundd,
SpatialCropGuidanced,
)
from monai.utils.enums import PostFix
IMAGE = np.array([[[[1, 0, 2, 0, 1], [0, 1, 2, 1, 0], [2, 2, 3, 2, 2], [0, 1, 2, 1, 0], [1, 0, 2, 0, 1]]]])
LABEL = np.array([[[[0, 0, 0, 0, 0], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0], [0, 1, 0, 1, 0], [0, 0, 0, 0, 0]]]])
DATA_1 = {"image": IMAGE, "label": LABEL, PostFix.meta("image"): {}, PostFix.meta("label"): {}}
DATA_2 = {
"image": np.array(
[
[
[[1, 2, 3, 2, 1], [1, 1, 3, 2, 1], [0, 0, 0, 0, 0], [1, 1, 1, 2, 1], [0, 2, 2, 2, 1]],
[[1, 0, 2, 0, 1], [0, 1, 2, 1, 0], [2, 2, 3, 2, 2], [0, 1, 2, 1, 0], [1, 0, 2, 0, 1]],
]
]
),
"label": np.array(
[
[
[[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]],
[[0, 0, 0, 0, 0], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0], [0, 1, 0, 1, 0], [0, 0, 0, 0, 0]],
]
]
),
"guidance": np.array([[[1, 0, 2, 2], [1, 1, 2, 2]], [[-1, -1, -1, -1], [-1, -1, -1, -1]]]),
}
DATA_3 = {
"image": IMAGE,
"label": LABEL,
"pred": np.array([[[[0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 1, 0, 0], [0, 1, 0, 1, 0], [0, 0, 0, 0, 0]]]]),
}
DATA_4 = {
"image": IMAGE,
"label": LABEL,
"guidance": np.array([[[1, 0, 2, 2]], [[-1, -1, -1, -1]]]),
"discrepancy": np.array(
[
[[[[0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]],
]
),
"probability": 1.0,
}
DATA_5 = {
"image": np.arange(25).reshape((1, 5, 5)),
PostFix.meta("image"): {"spatial_shape": [5, 5, 1]},
"foreground": [[2, 2, 0]],
"background": [],
}
DATA_6 = {
"image": np.arange(25).reshape((1, 5, 5)),
PostFix.meta("image"): {"spatial_shape": [5, 2, 1]},
"foreground": [[2, 1, 0]],
"background": [[1, 0, 0]],
}
DATA_7 = {
"image": np.arange(500).reshape((5, 10, 10)),
PostFix.meta("image"): {"spatial_shape": [20, 20, 10]},
"foreground": [[10, 14, 6], [10, 14, 8]],
"background": [[10, 16, 8]],
"slice": 6,
}
DATA_8 = {
"image": np.arange(500).reshape((1, 5, 10, 10)),
PostFix.meta("image"): {"spatial_shape": [20, 20, 10]},
"guidance": [[[3, 5, 7], [4, 5, 7]], [[4, 5, 8]]],
}
DATA_9 = {
"image": np.arange(1000).reshape((1, 5, 10, 20)),
PostFix.meta("image"): {"foreground_cropped_shape": (1, 10, 20, 40)},
"guidance": [[[6, 10, 14], [8, 10, 14]], [[8, 10, 16]]],
}
DATA_10 = {
"image": np.arange(9).reshape((1, 1, 3, 3)),
PostFix.meta("image"): {
"spatial_shape": [3, 3, 1],
"foreground_start_coord": np.array([0, 0, 0]),
"foreground_end_coord": np.array([1, 3, 3]),
"foreground_original_shape": (1, 1, 3, 3),
"foreground_cropped_shape": (1, 1, 3, 3),
"original_affine": np.array(
[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]
),
},
"pred": np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]),
}
DATA_11 = {
"image": np.arange(500).reshape((1, 5, 10, 10)),
PostFix.meta("image"): {
"spatial_shape": [20, 20, 10],
"foreground_start_coord": np.array([2, 2, 2]),
"foreground_end_coord": np.array([4, 4, 4]),
"foreground_original_shape": (1, 5, 10, 10),
"foreground_cropped_shape": (1, 2, 2, 2),
"original_affine": np.array(
[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]
),
},
"pred": np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]),
}
DATA_12 = {"image": np.arange(27).reshape(3, 3, 3), PostFix.meta("image"): {}, "guidance": [[0, 0, 0], [0, 1, 1], 1]}
FIND_SLICE_TEST_CASE_1 = [{"label": "label", "sids": "sids"}, DATA_1, [0]]
FIND_SLICE_TEST_CASE_2 = [{"label": "label", "sids": "sids"}, DATA_2, [0, 1]]
CROP_TEST_CASE_1 = [
{
"keys": ["image", "label"],
"source_key": "label",
"select_fn": lambda x: x > 0,
"channel_indices": None,
"margin": 0,
"spatial_size": [1, 4, 4],
},
DATA_1,
np.array([[[[1, 2, 1], [2, 3, 2], [1, 2, 1]]]]),
]
CROP_TEST_CASE_2 = [
{
"keys": ["image", "label"],
"source_key": "label",
"select_fn": lambda x: x > 0,
"channel_indices": None,
"margin": 0,
"spatial_size": [2, 4, 4],
},
DATA_1,
np.array([1, 1, 4, 4]),
]
ADD_INITIAL_POINT_TEST_CASE_1 = [
{"label": "label", "guidance": "guidance", "sids": "sids"},
DATA_1,
"[[[1, 0, 2, 2]], [[-1, -1, -1, -1]]]",
]
ADD_GUIDANCE_TEST_CASE_1 = [
{"image": "image", "guidance": "guidance"},
DATA_2,
np.array(
[
[
[[1, 2, 3, 2, 1], [1, 1, 3, 2, 1], [0, 0, 0, 0, 0], [1, 1, 1, 2, 1], [0, 2, 2, 2, 1]],
[[1, 0, 2, 0, 1], [0, 1, 2, 1, 0], [2, 2, 3, 2, 2], [0, 1, 2, 1, 0], [1, 0, 2, 0, 1]],
],
[
[
[0.0, 0.26689214, 0.37996644, 0.26689214, 0.0],
[0.26689214, 0.65222847, 0.81548417, 0.65222847, 0.26689214],
[0.37996635, 0.81548399, 1.0, 0.81548399, 0.37996635],
[0.26689214, 0.65222847, 0.81548417, 0.65222847, 0.26689214],
[0.0, 0.26689214, 0.37996644, 0.26689214, 0.0],
],
[
[0.0, 0.26689214, 0.37996644, 0.26689214, 0.0],
[0.26689214, 0.65222847, 0.81548417, 0.65222847, 0.26689214],
[0.37996635, 0.81548399, 1.0, 0.81548399, 0.37996635],
[0.26689214, 0.65222847, 0.81548417, 0.65222847, 0.26689214],
[0.0, 0.26689214, 0.37996644, 0.26689214, 0.0],
],
],
[
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
],
]
),
]
FIND_DISCREPANCY_TEST_CASE_1 = [
{"label": "label", "pred": "pred", "discrepancy": "discrepancy"},
DATA_3,
np.array(
[
[[[[0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]],
]
),
]
ADD_RANDOM_GUIDANCE_TEST_CASE_1 = [
{"guidance": "guidance", "discrepancy": "discrepancy", "probability": "probability"},
DATA_4,
"[[[1, 0, 2, 2], [1, 0, 1, 3]], [[-1, -1, -1, -1], [-1, -1, -1, -1]]]",
]
ADD_GUIDANCE_FROM_POINTS_TEST_CASE_1 = [
{"ref_image": "image", "spatial_dims": 3, "guidance": "guidance", "depth_first": True},
DATA_5,
[[0, 2, 2]],
[],
]
ADD_GUIDANCE_FROM_POINTS_TEST_CASE_2 = [
{"ref_image": "image", "spatial_dims": 3, "guidance": "guidance", "depth_first": True},
DATA_6,
[[0, 2, 2]],
[[0, 1, 0]],
]
ADD_GUIDANCE_FROM_POINTS_TEST_CASE_3 = [
{"ref_image": "image", "spatial_dims": 3, "guidance": "guidance", "depth_first": True},
DATA_7,
[[3, 5, 7], [4, 5, 7]],
[[4, 5, 8]],
]
ADD_GUIDANCE_FROM_POINTS_TEST_CASE_4 = [
{"ref_image": "image", "spatial_dims": 2, "guidance": "guidance", "depth_first": True},
DATA_6,
[[2, 2]],
[[1, 0]],
]
ADD_GUIDANCE_FROM_POINTS_TEST_CASE_5 = [
{"ref_image": "image", "spatial_dims": 2, "guidance": "guidance", "depth_first": True, "slice_key": "slice"},
DATA_7,
[[5, 7]],
[],
]
ADD_GUIDANCE_FROM_POINTS_TEST_CASE_6 = [
{"ref_image": "image", "spatial_dims": 2, "guidance": "guidance", "depth_first": True},
DATA_5,
[[2, 2]],
[],
]
SPATIAL_CROP_GUIDANCE_TEST_CASE_1 = [
{"keys": ["image"], "guidance": "guidance", "spatial_size": [1, 4, 4], "margin": 0},
DATA_8,
np.array([[[[357, 358]], [[457, 458]]]]),
]
SPATIAL_CROP_GUIDANCE_TEST_CASE_2 = [
{"keys": ["image"], "guidance": "guidance", "spatial_size": [2, 2], "margin": 1},
DATA_8,
np.array(
[
[
[[246, 247, 248, 249], [256, 257, 258, 259], [266, 267, 268, 269]],
[[346, 347, 348, 349], [356, 357, 358, 359], [366, 367, 368, 369]],
[[446, 447, 448, 449], [456, 457, 458, 459], [466, 467, 468, 469]],
]
]
),
]
SPATIAL_CROP_GUIDANCE_TEST_CASE_3 = [
{"keys": ["image"], "guidance": "guidance", "spatial_size": [3, 3], "margin": 0},
DATA_8,
np.array(
[
[
[[47, 48, 49], [57, 58, 59], [67, 68, 69]],
[[147, 148, 149], [157, 158, 159], [167, 168, 169]],
[[247, 248, 249], [257, 258, 259], [267, 268, 269]],
[[347, 348, 349], [357, 358, 359], [367, 368, 369]],
[[447, 448, 449], [457, 458, 459], [467, 468, 469]],
]
]
),
]
RESIZE_GUIDANCE_TEST_CASE_1 = [
{"ref_image": "image", "guidance": "guidance"},
DATA_9,
[[[3, 5, 7], [4, 5, 7]], [[4, 5, 8]]],
]
RESTORE_LABEL_TEST_CASE_1 = [
{"keys": ["pred"], "ref_image": "image", "mode": "nearest"},
DATA_10,
np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]),
]
RESULT = np.zeros((10, 20, 20))
RESULT[4:8, 4:8, 4:8] = np.array(
[
[[1.0, 1.0, 2.0, 2.0], [1.0, 1.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0], [3.0, 3.0, 4.0, 4.0]],
[[1.0, 1.0, 2.0, 2.0], [1.0, 1.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0], [3.0, 3.0, 4.0, 4.0]],
[[5.0, 5.0, 6.0, 6.0], [5.0, 5.0, 6.0, 6.0], [7.0, 7.0, 8.0, 8.0], [7.0, 7.0, 8.0, 8.0]],
[[5.0, 5.0, 6.0, 6.0], [5.0, 5.0, 6.0, 6.0], [7.0, 7.0, 8.0, 8.0], [7.0, 7.0, 8.0, 8.0]],
]
)
RESTORE_LABEL_TEST_CASE_2 = [{"keys": ["pred"], "ref_image": "image", "mode": "nearest"}, DATA_11, RESULT]
FETCH_2D_SLICE_TEST_CASE_1 = [
{"keys": ["image"], "guidance": "guidance"},
DATA_12,
np.array([[9, 10, 11], [12, 13, 14], [15, 16, 17]]),
]
class TestFindAllValidSlicesd(unittest.TestCase):
@parameterized.expand([FIND_SLICE_TEST_CASE_1, FIND_SLICE_TEST_CASE_2])
def test_correct_results(self, arguments, input_data, expected_result):
result = FindAllValidSlicesd(**arguments)(input_data)
np.testing.assert_allclose(result[arguments["sids"]], expected_result)
class TestSpatialCropForegroundd(unittest.TestCase):
@parameterized.expand([CROP_TEST_CASE_1])
def test_correct_results(self, arguments, input_data, expected_result):
result = SpatialCropForegroundd(**arguments)(input_data)
np.testing.assert_allclose(result["image"], expected_result)
@parameterized.expand([CROP_TEST_CASE_2])
def test_correct_shape(self, arguments, input_data, expected_shape):
result = SpatialCropForegroundd(**arguments)(input_data)
np.testing.assert_equal(result["image"].shape, expected_shape)
@parameterized.expand([CROP_TEST_CASE_1])
def test_foreground_position(self, arguments, input_data, _):
result = SpatialCropForegroundd(**arguments)(input_data)
np.testing.assert_allclose(result[PostFix.meta("image")]["foreground_start_coord"], np.array([0, 1, 1]))
np.testing.assert_allclose(result[PostFix.meta("image")]["foreground_end_coord"], np.array([1, 4, 4]))
arguments["start_coord_key"] = "test_start_coord"
arguments["end_coord_key"] = "test_end_coord"
result = SpatialCropForegroundd(**arguments)(input_data)
np.testing.assert_allclose(result[PostFix.meta("image")]["test_start_coord"], np.array([0, 1, 1]))
np.testing.assert_allclose(result[PostFix.meta("image")]["test_end_coord"], np.array([1, 4, 4]))
class TestAddInitialSeedPointd(unittest.TestCase):
@parameterized.expand([ADD_INITIAL_POINT_TEST_CASE_1])
def test_correct_results(self, arguments, input_data, expected_result):
seed = 0
add_fn = AddInitialSeedPointd(**arguments)
add_fn.set_random_state(seed)
result = add_fn(input_data)
self.assertEqual(result[arguments["guidance"]], expected_result)
class TestAddGuidanceSignald(unittest.TestCase):
@parameterized.expand([ADD_GUIDANCE_TEST_CASE_1])
def test_correct_results(self, arguments, input_data, expected_result):
result = AddGuidanceSignald(**arguments)(input_data)
np.testing.assert_allclose(result["image"], expected_result, rtol=1e-5)
class TestFindDiscrepancyRegionsd(unittest.TestCase):
@parameterized.expand([FIND_DISCREPANCY_TEST_CASE_1])
def test_correct_results(self, arguments, input_data, expected_result):
result = FindDiscrepancyRegionsd(**arguments)(input_data)
np.testing.assert_allclose(result[arguments["discrepancy"]], expected_result)
class TestAddRandomGuidanced(unittest.TestCase):
@parameterized.expand([ADD_RANDOM_GUIDANCE_TEST_CASE_1])
def test_correct_results(self, arguments, input_data, expected_result):
seed = 0
add_fn = AddRandomGuidanced(**arguments)
add_fn.set_random_state(seed)
result = add_fn(input_data)
self.assertEqual(result[arguments["guidance"]], expected_result)
class TestAddGuidanceFromPointsd(unittest.TestCase):
@parameterized.expand(
[
ADD_GUIDANCE_FROM_POINTS_TEST_CASE_1,
ADD_GUIDANCE_FROM_POINTS_TEST_CASE_2,
ADD_GUIDANCE_FROM_POINTS_TEST_CASE_3,
ADD_GUIDANCE_FROM_POINTS_TEST_CASE_4,
ADD_GUIDANCE_FROM_POINTS_TEST_CASE_5,
ADD_GUIDANCE_FROM_POINTS_TEST_CASE_6,
]
)
def test_correct_results(self, arguments, input_data, expected_pos, expected_neg):
result = AddGuidanceFromPointsd(**arguments)(input_data)
self.assertEqual(result[arguments["guidance"]][0], expected_pos)
self.assertEqual(result[arguments["guidance"]][1], expected_neg)
class TestSpatialCropGuidanced(unittest.TestCase):
@parameterized.expand(
[SPATIAL_CROP_GUIDANCE_TEST_CASE_1, SPATIAL_CROP_GUIDANCE_TEST_CASE_2, SPATIAL_CROP_GUIDANCE_TEST_CASE_3]
)
def test_correct_results(self, arguments, input_data, expected_result):
result = SpatialCropGuidanced(**arguments)(input_data)
np.testing.assert_allclose(result["image"], expected_result)
class TestResizeGuidanced(unittest.TestCase):
@parameterized.expand([RESIZE_GUIDANCE_TEST_CASE_1])
def test_correct_results(self, arguments, input_data, expected_result):
result = ResizeGuidanced(**arguments)(input_data)
self.assertEqual(result[arguments["guidance"]], expected_result)
class TestRestoreLabeld(unittest.TestCase):
@parameterized.expand([RESTORE_LABEL_TEST_CASE_1, RESTORE_LABEL_TEST_CASE_2])
def test_correct_results(self, arguments, input_data, expected_result):
result = RestoreLabeld(**arguments)(input_data)
np.testing.assert_allclose(result["pred"], expected_result)
class TestFetch2DSliced(unittest.TestCase):
@parameterized.expand([FETCH_2D_SLICE_TEST_CASE_1])
def test_correct_results(self, arguments, input_data, expected_result):
result = Fetch2DSliced(**arguments)(input_data)
np.testing.assert_allclose(result["image"], expected_result)
if __name__ == "__main__":
unittest.main()
|
f1341bc1ee4d62d8ce7a8b49ca96ad72f17d9648
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/hummingbot/user/user_balances.py
|
da3a60a33e46e7a8c8ca5adf48001d6c8bfafe21
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 7,646
|
py
|
user_balances.py
|
import logging
from decimal import Decimal
from functools import lru_cache
from typing import Dict, List, Optional, Set
from hummingbot.client.config.client_config_map import ClientConfigMap
from hummingbot.client.config.config_helpers import ReadOnlyClientConfigAdapter, get_connector_class
from hummingbot.client.config.security import Security
from hummingbot.client.settings import AllConnectorSettings, GatewayConnectionSetting, gateway_connector_trading_pairs
from hummingbot.core.utils.async_utils import safe_gather
from hummingbot.core.utils.gateway_config_utils import flatten
from hummingbot.core.utils.market_price import get_last_price
class UserBalances:
__instance = None
@staticmethod
def connect_market(exchange, client_config_map: ClientConfigMap, **api_details):
connector = None
conn_setting = AllConnectorSettings.get_connector_settings()[exchange]
if api_details or conn_setting.uses_gateway_generic_connector():
connector_class = get_connector_class(exchange)
read_only_client_config = ReadOnlyClientConfigAdapter.lock_config(client_config_map)
init_params = conn_setting.conn_init_parameters(
trading_pairs=gateway_connector_trading_pairs(conn_setting.name),
api_keys=api_details,
client_config_map=read_only_client_config,
)
# collect trading pairs from the gateway connector settings
trading_pairs: List[str] = gateway_connector_trading_pairs(conn_setting.name)
# collect unique trading pairs that are for balance reporting only
if conn_setting.uses_gateway_generic_connector():
config: Optional[Dict[str, str]] = GatewayConnectionSetting.get_connector_spec_from_market_name(conn_setting.name)
if config is not None:
existing_pairs = set(flatten([x.split("-") for x in trading_pairs]))
other_tokens: Set[str] = set(config.get("tokens", "").split(","))
other_tokens.discard("")
tokens: List[str] = [t for t in other_tokens if t not in existing_pairs]
if tokens != [""]:
trading_pairs.append("-".join(tokens))
connector = connector_class(**init_params)
return connector
# return error message if the _update_balances fails
@staticmethod
async def _update_balances(market) -> Optional[str]:
try:
await market._update_balances()
except Exception as e:
logging.getLogger().debug(f"Failed to update balances for {market}", exc_info=True)
return str(e)
return None
@staticmethod
def instance():
if UserBalances.__instance is None:
UserBalances()
return UserBalances.__instance
@staticmethod
@lru_cache(maxsize=10)
def is_gateway_market(exchange_name: str) -> bool:
return (
exchange_name in sorted(
AllConnectorSettings.get_gateway_amm_connector_names().union(
AllConnectorSettings.get_gateway_evm_amm_lp_connector_names()
).union(
AllConnectorSettings.get_gateway_clob_connector_names()
)
)
)
def __init__(self):
if UserBalances.__instance is not None:
raise Exception("This class is a singleton!")
else:
UserBalances.__instance = self
self._markets = {}
async def add_exchange(self, exchange, client_config_map: ClientConfigMap, **api_details) -> Optional[str]:
self._markets.pop(exchange, None)
market = UserBalances.connect_market(exchange, client_config_map, **api_details)
if not market:
return "API keys have not been added."
err_msg = await UserBalances._update_balances(market)
if err_msg is None:
self._markets[exchange] = market
return err_msg
def all_balances(self, exchange) -> Dict[str, Decimal]:
if exchange not in self._markets:
return {}
return self._markets[exchange].get_all_balances()
async def update_exchange_balance(self, exchange_name: str, client_config_map: ClientConfigMap) -> Optional[str]:
is_gateway_market = self.is_gateway_market(exchange_name)
if is_gateway_market and exchange_name in self._markets:
# we want to refresh gateway connectors always, since the applicable tokens change over time.
# doing this will reinitialize and fetch balances for active trading pair
del self._markets[exchange_name]
if exchange_name in self._markets:
return await self._update_balances(self._markets[exchange_name])
else:
await Security.wait_til_decryption_done()
api_keys = Security.api_keys(exchange_name) if not is_gateway_market else {}
return await self.add_exchange(exchange_name, client_config_map, **api_keys)
# returns error message for each exchange
async def update_exchanges(
self,
client_config_map: ClientConfigMap,
reconnect: bool = False,
exchanges: Optional[List[str]] = None
) -> Dict[str, Optional[str]]:
exchanges = exchanges or []
tasks = []
# Update user balances
if len(exchanges) == 0:
exchanges = [cs.name for cs in AllConnectorSettings.get_connector_settings().values()]
exchanges: List[str] = [
cs.name
for cs in AllConnectorSettings.get_connector_settings().values()
if not cs.use_ethereum_wallet
and cs.name in exchanges
and not cs.name.endswith("paper_trade")
]
if reconnect:
self._markets.clear()
for exchange in exchanges:
tasks.append(self.update_exchange_balance(exchange, client_config_map))
results = await safe_gather(*tasks)
return {ex: err_msg for ex, err_msg in zip(exchanges, results)}
async def all_balances_all_exchanges(self, client_config_map: ClientConfigMap) -> Dict[str, Dict[str, Decimal]]:
await self.update_exchanges(client_config_map)
return {k: v.get_all_balances() for k, v in sorted(self._markets.items(), key=lambda x: x[0])}
def all_available_balances_all_exchanges(self) -> Dict[str, Dict[str, Decimal]]:
return {k: v.available_balances for k, v in sorted(self._markets.items(), key=lambda x: x[0])}
async def balances(self, exchange, client_config_map: ClientConfigMap, *symbols) -> Dict[str, Decimal]:
if await self.update_exchange_balance(exchange, client_config_map) is None:
results = {}
for token, bal in self.all_balances(exchange).items():
matches = [s for s in symbols if s.lower() == token.lower()]
if matches:
results[matches[0]] = bal
return results
@staticmethod
def validate_ethereum_wallet() -> Optional[str]:
return "Connector deprecated."
@staticmethod
async def base_amount_ratio(exchange, trading_pair, balances) -> Optional[Decimal]:
try:
base, quote = trading_pair.split("-")
base_amount = balances.get(base, 0)
quote_amount = balances.get(quote, 0)
price = await get_last_price(exchange, trading_pair)
total_value = base_amount + (quote_amount / price)
return None if total_value <= 0 else base_amount / total_value
except Exception:
return None
|
4bae3f7ef316da577efd9ca1c8d2fb20b226e295
|
919bbb62b011823e98f7b57ae902e4d84269539e
|
/test/unit/test_paulis.py
|
5807b1bc2a49de9f6b0e494be4c4d2ecb8dd41ca
|
[
"Apache-2.0"
] |
permissive
|
rigetti/pyquil
|
32007af9033c5e46406b4284eed75f21406970af
|
839663e3e777d3a6ea6eca3b68d64fb6477ab8c2
|
refs/heads/master
| 2023-09-01T15:52:28.411743
| 2023-07-07T13:28:56
| 2023-07-07T13:28:56
| 78,470,807
| 764
| 232
|
Apache-2.0
| 2023-09-13T23:10:31
| 2017-01-09T21:30:22
|
Python
|
UTF-8
|
Python
| false
| false
| 24,295
|
py
|
test_paulis.py
|
#!/usr/bin/python
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import math
import warnings
from functools import reduce
from itertools import product
from operator import mul
import numpy as np
import pytest
from pyquil.gates import RX, RZ, CNOT, H, X, PHASE
from pyquil.paulis import (
PauliTerm,
PauliSum,
exponential_map,
exponentiate_commuting_pauli_sum,
ID,
exponentiate,
trotterize,
is_zero,
check_commutation,
commuting_sets,
term_with_coeff,
sI,
sX,
sY,
sZ,
ZERO,
is_identity,
)
from pyquil.quil import Program
from pyquil.simulation.tools import program_unitary
def isclose(a, b, rel_tol=1e-10, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def test_simplify_terms():
term = PauliTerm("Z", 0) * -1.0 * PauliTerm("Z", 0)
assert term.id() == ""
assert term.coefficient == -1.0
term = PauliTerm("Z", 0) + PauliTerm("Z", 0, 1.0)
assert str(term) == "(2+0j)*Z0"
def test_get_qubits():
term = PauliTerm("Z", 0) * PauliTerm("X", 1)
assert term.get_qubits() == [0, 1]
sum_term = PauliTerm("X", 0, 0.5) + 0.5j * PauliTerm("Y", 10) * PauliTerm("Y", 0, 0.5j)
assert sum_term.get_qubits() == [0, 10]
def test_simplify_term_id_1():
term = PauliTerm("I", 0, 0.5)
assert term.id() == ""
assert term.coefficient == 0.5
def test_simplify_term_id_2():
term = 0.5 * ID()
assert term.id() == ""
assert term.coefficient == 0.5
def test_simplify_term_id_3():
s = 0.25 + 0.25 * ID()
terms = s.terms
assert len(terms) == 1
assert terms[0].id() == ""
assert terms[0].coefficient == 0.5
def test_simplify_term_single():
term = PauliTerm("Z", 0) * PauliTerm("I", 1) * PauliTerm("X", 2, 0.5j) * PauliTerm("Z", 0, 1.0)
assert term.id() == "X2"
assert term.coefficient == 0.5j
def test_simplify_term_xz():
term1 = (-0.5 * PauliTerm("X", 0)) * (-1.0 * PauliTerm("Z", 0))
term2 = -0.5 * PauliTerm("X", 0) * (-1.0) * PauliTerm("Z", 0)
term3 = 0.5 * PauliTerm("X", 0) * PauliTerm("Z", 0)
for term in [term1, term2, term3]:
assert term.id() == "Y0"
assert term.coefficient == -0.5j
def test_simplify_term_multindex():
term = PauliTerm("X", 0, coefficient=-0.5) * PauliTerm("Z", 0, coefficient=-1.0) * PauliTerm("X", 2, 0.5)
assert term.id(sort_ops=False) == "Y0X2"
assert term.coefficient == -0.25j
def test_simplify_sum_terms():
sum_term = PauliSum([PauliTerm("X", 0, 0.5), PauliTerm("Z", 0, 0.5j)])
str_sum_term = str(sum_term + sum_term)
assert str_sum_term == "(1+0j)*X0 + 1j*Z0" or str_sum_term == "1j*Z0 + (1+0j)*X0"
sum_term = PauliSum([PauliTerm("X", 0, 0.5), PauliTerm("X", 0, 0.5)])
assert str(sum_term.simplify()) == "(1+0j)*X0"
# test the simplify on multiplication
sum_term = PauliSum([PauliTerm("X", 0, 0.5), PauliTerm("X", 0, 0.5)])
assert str(sum_term * sum_term) == "(1+0j)*I"
def test_copy():
term = PauliTerm("X", 0, 0.5) * PauliTerm("X", 1, 0.5)
new_term = term.copy()
term = term * PauliTerm("X", 2, 0.5)
new_term = new_term * PauliTerm("X", 2, 0.5)
assert term == new_term # value equality
assert term is not new_term # ref inequality
assert term._ops is not new_term._ops
term = PauliTerm("X", 0, 0.5) * PauliTerm("X", 1, 0.5)
new_term = term * PauliTerm("X", 2, 0.5)
assert term != new_term
assert term is not new_term
assert term._ops is not new_term._ops
def test_len():
term = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0)
assert len(term) == 2
def test_sum_len():
pauli_sum = PauliTerm("Z", 0, 1.0) + PauliTerm("Z", 1, 1.0)
assert len(pauli_sum) == 2
def test_enumerate():
term = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
position_op_pairs = [(0, "Z"), (1, "Z"), (5, "X")]
for key, val in term:
assert (key, val) in position_op_pairs
def test_getitem():
term = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
assert term[0] == "Z"
assert term[1] == "Z"
assert term[2] == "I"
assert term[3] == "I"
assert term[4] == "I"
assert term[5] == "X"
assert len(term) == 3
def test_ids():
term_1 = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
term_2 = PauliTerm("X", 5, 5) * PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0)
with pytest.warns(FutureWarning) as w:
assert term_1.id() == term_2.id()
assert "should be avoided" in str(w[0])
def test_ids_no_sort():
term_1 = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
term_2 = PauliTerm("X", 5, 5) * PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0)
assert term_1.id(sort_ops=False) == "Z0Z1X5"
assert term_2.id(sort_ops=False) == "X5Z0Z1"
def test_operations_as_set():
term_1 = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
term_2 = PauliTerm("X", 5, 5) * PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0)
assert term_1.operations_as_set() == term_2.operations_as_set()
def test_pauliop_inputs():
with pytest.raises(ValueError):
PauliTerm("X", -2)
def test_pauli_sum():
q_plus = 0.5 * PauliTerm("X", 0) + 0.5j * PauliTerm("Y", 0)
the_sum = q_plus * PauliSum([PauliTerm("X", 0)])
term_strings = [str(x) for x in the_sum.terms]
assert "(0.5+0j)*I" in term_strings
assert "(0.5+0j)*Z0" in term_strings
assert len(term_strings) == 2
assert len(the_sum.terms) == 2
the_sum = q_plus * PauliTerm("X", 0)
term_strings = [str(x) for x in the_sum.terms]
assert "(0.5+0j)*I" in term_strings
assert "(0.5+0j)*Z0" in term_strings
assert len(term_strings) == 2
assert len(the_sum.terms) == 2
the_sum = PauliTerm("X", 0) * q_plus
term_strings = [str(x) for x in the_sum.terms]
assert "(0.5+0j)*I" in term_strings
assert "(-0.5+0j)*Z0" in term_strings
assert len(term_strings) == 2
assert len(the_sum.terms) == 2
with pytest.raises(ValueError):
_ = PauliSum(sI(0))
with pytest.raises(ValueError):
_ = PauliSum([1, 1, 1, 1])
with pytest.raises(ValueError):
_ = the_sum * []
def test_ps_adds_pt_1():
term = ID()
b = term + term
assert str(b) == "(2+0j)*I"
assert str(b + term) == "(3+0j)*I"
assert str(term + b) == "(3+0j)*I"
def test_ps_adds_pt_2():
term = ID()
b = term + 1.0
assert str(b) == "(2+0j)*I"
assert str(b + 1.0) == "(3+0j)*I"
assert str(1.0 + b) == "(3+0j)*I"
b = sX(0) + 1.0
assert str(b) == "(1+0j)*X0 + (1+0j)*I"
b = 1.0 + sX(0)
assert str(b) == "(1+0j)*I + (1+0j)*X0"
def test_pauliterm_sub():
assert str(sX(1) - 2.0) == str(sX(1) + -2.0)
assert str(1.4 - sZ(1)) == str(1.4 + -1.0 * sZ(1))
def test_ps_sub():
term = 3 * ID()
b = term - 1.0
assert str(b) == "(2+0j)*I"
assert str(b - 1.0) == "(1+0j)*I"
assert str(1.0 - b) == "(-1+0j)*I"
b = 1.0 - sX(0)
assert str(b) == "(1+0j)*I + (-1+0j)*X0"
b = sX(0) - 1.0
assert str(b) == "(1+0j)*X0 + (-1+0j)*I"
def test_zero_terms():
term = PauliTerm("X", 0, 1.0) + PauliTerm("X", 0, -1.0) + PauliTerm("Y", 0, 0.5)
assert str(term) == "(0.5+0j)*Y0"
term = PauliTerm("X", 0, 1.0) + PauliTerm("X", 0, -1.0)
assert str(term) == "0j*I"
assert len(term.terms) == 1
term2 = term * PauliTerm("Z", 2, 0.5)
assert str(term2) == "0j*I"
term3 = PauliTerm("Z", 2, 0.5) + term
assert str(term3) == "(0.5+0j)*Z2"
term4 = PauliSum([])
assert str(term4) == "0j*I"
term = PauliSum([PauliTerm("X", 0, 0.0), PauliTerm("Y", 1, 1.0) * PauliTerm("Z", 2)])
assert str(term) == "0j*X0 + (1+0j)*Y1*Z2"
term = term.simplify()
assert str(term) == "(1+0j)*Y1*Z2"
def test_exponentiate_1():
# test rotation of single qubit
generator = PauliTerm("Z", 0, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst(RZ(2.0, 0))
assert prog == result_prog
def test_exponentiate_2():
# testing general 2-circuit
generator = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst(CNOT(0, 1)).inst(RZ(2.0, 1)).inst(CNOT(0, 1))
assert prog == result_prog
def test_exponentiate_bp0_ZX():
# testing change of basis position 0
generator = PauliTerm("X", 0, 1.0) * PauliTerm("Z", 1, 1.0)
param_prog = exponential_map(generator)
prog = param_prog(1)
result_prog = Program().inst([H(0), CNOT(0, 1), RZ(2.0, 1), CNOT(0, 1), H(0)])
assert prog == result_prog
def test_exponentiate_bp1_XZ():
# testing change of basis position 1
generator = PauliTerm("Z", 0, 1.0) * PauliTerm("X", 1, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([H(1), CNOT(0, 1), RZ(2.0, 1), CNOT(0, 1), H(1)])
assert prog == result_prog
def test_exponentiate_bp0_ZY():
# testing change of basis position 0
generator = PauliTerm("Y", 0, 1.0) * PauliTerm("Z", 1, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([RX(math.pi / 2.0, 0), CNOT(0, 1), RZ(2.0, qubit=1), CNOT(0, 1), RX(-math.pi / 2, 0)])
assert prog == result_prog
def test_exponentiate_bp1_YZ():
# testing change of basis position 1
generator = PauliTerm("Z", 0, 1.0) * PauliTerm("Y", 1, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([RX(math.pi / 2.0, 1), CNOT(0, 1), RZ(2.0, 1), CNOT(0, 1), RX(-math.pi / 2.0, 1)])
assert prog == result_prog
def test_exponentiate_3cob():
# testing circuit for 3-terms with change of basis
generator = PauliTerm("Z", 0, 1.0) * PauliTerm("Y", 1, 1.0) * PauliTerm("X", 2, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst(
[
RX(math.pi / 2.0, 1),
H(2),
CNOT(0, 1),
CNOT(1, 2),
RZ(2.0, 2),
CNOT(1, 2),
CNOT(0, 1),
RX(-math.pi / 2.0, 1),
H(2),
]
)
assert prog == result_prog
def test_exponentiate_3ns():
# testing circuit for 3-terms non-sequential
generator = PauliTerm("Y", 0, 1.0) * PauliTerm("I", 1, 1.0) * PauliTerm("Y", 2, 1.0) * PauliTerm("Y", 3, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst(
[
RX(math.pi / 2.0, 0),
RX(math.pi / 2.0, 2),
RX(math.pi / 2.0, 3),
CNOT(0, 2),
CNOT(2, 3),
RZ(2.0, 3),
CNOT(2, 3),
CNOT(0, 2),
RX(-math.pi / 2.0, 0),
RX(-math.pi / 2.0, 2),
RX(-math.pi / 2.0, 3),
]
)
assert prog == result_prog
def test_exponentiate_commuting_pauli_sum():
pauli_sum = PauliSum([PauliTerm("Z", 0, 0.5), PauliTerm("Z", 1, 0.5)])
prog = Program().inst(RZ(1.0, 0)).inst(RZ(1.0, 1))
result_prog = exponentiate_commuting_pauli_sum(pauli_sum)(1.0)
assert prog == result_prog
def test_exponentiate_prog():
ham = PauliTerm("Z", 0)
result_prog = Program(RZ(2.0, 0))
prog = exponentiate(ham)
assert prog == result_prog
def test_exponentiate_identity():
generator = PauliTerm("I", 1, 0.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program()
assert prog == result_prog
generator = PauliTerm("I", 1, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([X(0), PHASE(-1.0, 0), X(0), PHASE(-1.0, 0)])
assert prog == result_prog
generator = PauliTerm("I", 10, 0.08)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([X(0), PHASE(-0.08, 0), X(0), PHASE(-0.08, 0)])
assert prog == result_prog
def test_trotterize():
term_one = PauliTerm("X", 0, 1.0)
term_two = PauliTerm("Z", 0, 1.0)
with pytest.raises(ValueError):
trotterize(term_one, term_two, trotter_order=0)
with pytest.raises(ValueError):
trotterize(term_one, term_two, trotter_order=5)
prog = trotterize(term_one, term_one)
result_prog = Program().inst([H(0), RZ(2.0, 0), H(0), H(0), RZ(2.0, 0), H(0)])
assert prog == result_prog
# trotter_order 1 steps 1
prog = trotterize(term_one, term_two, trotter_steps=1)
result_prog = Program().inst([H(0), RZ(2.0, 0), H(0), RZ(2.0, 0)])
assert prog == result_prog
# trotter_order 1 steps 2
prog = trotterize(term_one, term_two, trotter_steps=2)
result_prog = Program().inst([H(0), RZ(1.0, 0), H(0), RZ(1.0, 0), H(0), RZ(1.0, 0), H(0), RZ(1.0, 0)])
assert prog == result_prog
# trotter_order 2 steps 1
prog = trotterize(term_one, term_two, trotter_order=2)
result_prog = Program().inst([H(0), RZ(1.0, 0), H(0), RZ(2.0, 0), H(0), RZ(1.0, 0), H(0)])
assert prog == result_prog
# trotter_order 2 steps 2
prog = trotterize(term_one, term_two, trotter_order=2, trotter_steps=2)
result_prog = Program().inst(
[
H(0),
RZ(0.5, 0),
H(0),
RZ(1.0, 0),
H(0),
RZ(0.5, 0),
H(0),
H(0),
RZ(0.5, 0),
H(0),
RZ(1.0, 0),
H(0),
RZ(0.5, 0),
H(0),
]
)
assert prog == result_prog
# trotter_order 3 steps 1
prog = trotterize(term_one, term_two, trotter_order=3, trotter_steps=1)
result_prog = Program().inst(
[
H(0),
RZ(14.0 / 24, 0),
H(0),
RZ(4.0 / 3.0, 0),
H(0),
RZ(1.5, 0),
H(0),
RZ(-4.0 / 3.0, 0),
H(0),
RZ(-2.0 / 24, 0),
H(0),
RZ(2.0, 0),
]
)
assert prog == result_prog
def test_trotterize_order():
def expmi(hermitian_matrix):
"""Compute the matrix exponential of -1j * hermitian_matrix."""
L, Q = np.linalg.eigh(hermitian_matrix)
return Q @ np.diag(np.exp(-1j * L)) @ Q.conj().T
def error(order, time_step_length):
a_pauli = time_step_length * sZ(0) * sY(1) * sX(2)
a_program = a_pauli.program
b_pauli = time_step_length * sX(0) * sZ(1) * sY(2)
b_program = b_pauli.program
num_qubits = len(a_program.get_qubits())
assert num_qubits == len(b_program.get_qubits())
a = program_unitary(a_program, num_qubits)
b = program_unitary(b_program, num_qubits)
a_plus_b = a + b
exp_a_plus_b = expmi(time_step_length * a_plus_b)
trotter_program = trotterize(a_pauli, b_pauli, trotter_order=order)
trotter = program_unitary(trotter_program, num_qubits)
return np.linalg.norm(exp_a_plus_b - trotter, np.inf)
xs = 10 ** np.logspace(-1, -6, 10)
for order in [1, 2, 3, 4]:
ys = [error(order, float(x)) for x in xs]
p = np.polyfit(np.log10(xs), np.log10(ys), 1)
assert p[0] >= order, f"Bound not satisfied with order={order}: the slope is {p[0]}"
def test_is_zero():
with pytest.raises(TypeError):
is_zero(1)
p_term = PauliTerm("X", 0)
ps_term = p_term + PauliTerm("Z", 1)
id_term = PauliTerm("I", 0)
assert not is_zero(p_term)
assert is_zero(p_term + -1 * p_term)
assert not is_zero(ps_term)
assert not is_zero(id_term)
def test_check_commutation():
term1 = PauliTerm("X", 0) * PauliTerm("X", 1)
term2 = PauliTerm("Y", 0) * PauliTerm("Y", 1)
term3 = PauliTerm("Y", 0) * PauliTerm("Z", 2)
# assert check_commutation(PauliSum([term1]), term2)
assert check_commutation([term2], term3)
assert check_commutation([term2], term3)
assert not check_commutation([term1], term3)
def _commutator(t1, t2):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=r"The term .+ will be combined with .+, " r"but they have different orders of operations.*",
category=UserWarning,
)
return t1 * t2 + -1 * t2 * t1
def test_check_commutation_rigorous():
# more rigorous test. Get all operators in Pauli group
p_n_group = ("I", "X", "Y", "Z")
pauli_list = list(product(p_n_group, repeat=3))
pauli_ops = [list(zip(x, range(3))) for x in pauli_list]
pauli_ops_pq = [reduce(mul, (PauliTerm(*x) for x in op)) for op in pauli_ops]
non_commuting_pairs = []
commuting_pairs = []
for x in range(len(pauli_ops_pq)):
for y in range(x, len(pauli_ops_pq)):
tmp_op = _commutator(pauli_ops_pq[x], pauli_ops_pq[y])
assert len(tmp_op.terms) == 1
if is_zero(tmp_op.terms[0]):
commuting_pairs.append((pauli_ops_pq[x], pauli_ops_pq[y]))
else:
non_commuting_pairs.append((pauli_ops_pq[x], pauli_ops_pq[y]))
# now that we have our sets let's check against our code.
for t1, t2 in non_commuting_pairs:
assert not check_commutation([t1], t2)
for t1, t2 in commuting_pairs:
assert check_commutation([t1], t2)
def test_commuting_sets():
term1 = PauliTerm("X", 0) * PauliTerm("X", 1)
term2 = PauliTerm("Y", 0) * PauliTerm("Y", 1)
term3 = PauliTerm("Y", 0) * PauliTerm("Z", 2)
pauli_sum = term1 + term2 + term3
commuting_sets(pauli_sum)
def test_paulisum_iteration():
term_list = [sX(2), sZ(4)]
pauli_sum = sum(term_list)
for ii, term in enumerate(pauli_sum):
assert term_list[ii] == term
def test_paulisum_indexing():
pauli_sum = 0.5 * sX(0) + 0.1 * sZ(1)
assert pauli_sum[0] == 0.5 * sX(0)
for ii, term in enumerate(pauli_sum.terms):
assert pauli_sum[ii] == term
def test_term_powers():
for qubit_id in range(2):
pauli_terms = [sI(qubit_id), sX(qubit_id), sY(qubit_id), sZ(qubit_id)]
for pauli_term in pauli_terms:
assert pauli_term ** 0 == sI(qubit_id)
assert pauli_term ** 1 == pauli_term
assert pauli_term ** 2 == sI(qubit_id)
assert pauli_term ** 3 == pauli_term
with pytest.raises(ValueError):
pauli_terms[0] ** -1
# Test to make sure large powers can be computed
(PauliTerm("X", 0, 2) * PauliTerm("Y", 0, 2)) ** 400
def test_sum_power():
pauli_sum = (sY(0) - sX(0)) * (1.0 / np.sqrt(2))
assert pauli_sum ** 2 == PauliSum([sI(0)])
with pytest.raises(ValueError):
_ = pauli_sum ** -1
pauli_sum = sI(0) + sI(1)
assert pauli_sum ** 0 == sI(0)
# Test to make sure large powers can be computed
pauli_sum ** 400
def test_term_equality():
with pytest.raises(TypeError):
sI(0) != 0
assert sI(0) == sI(0)
assert PauliTerm("X", 10, 1 + 1.0j) == PauliTerm("X", 10, 1 + 1.0j)
assert PauliTerm("X", 10, 1 + 1.0j) + PauliTerm("X", 10, 1 + 1.0j) != PauliTerm("X", 10, 1 + 1.0j)
assert PauliTerm("X", 10, 1 + 1.0j) != PauliTerm("X", 10, 1 + 1.0j) + PauliTerm("X", 10, 1 + 1.0j)
def test_term_with_coeff():
assert PauliTerm("X", 0, 1.0j) == term_with_coeff(sX(0), 1.0j)
assert PauliTerm("X", 0, -1.0) == term_with_coeff(sX(0), -1)
with pytest.raises(ValueError):
term_with_coeff(sI(0), None)
def test_sum_equality():
pauli_sum = sY(0) - sX(0)
assert pauli_sum != 2 * pauli_sum
assert pauli_sum != pauli_sum + sZ(0)
assert pauli_sum + sZ(0) != pauli_sum
assert pauli_sum != sY(1) - sX(1)
assert pauli_sum == -1.0 * sX(0) + sY(0)
assert pauli_sum == pauli_sum * 1.0
with pytest.raises(TypeError):
assert pauli_sum != 0
def test_zero_term():
qubit_id = 0
coefficient = 10
ps = sI(qubit_id) + sX(qubit_id)
assert coefficient * ZERO() == ZERO()
assert ZERO() * coefficient == ZERO()
assert ZERO() * ID() == ZERO()
assert ZERO() + ID() == ID()
assert ZERO() + ps == ps
assert ps + ZERO() == ps
def test_from_list():
terms_list = [("X", 0), ("Y", 1), ("Z", 5)]
term = reduce(lambda x, y: x * y, [PauliTerm(*x) for x in terms_list])
pterm = PauliTerm.from_list(terms_list)
assert pterm == term
with pytest.raises(ValueError):
# terms are not on disjoint qubits
pterm = PauliTerm.from_list([("X", 0), ("Y", 0)])
def test_ordered():
term = sZ(3) * sZ(2) * sZ(1)
prog = exponential_map(term)(0.5)
assert prog.out() == "CNOT 3 2\nCNOT 2 1\nRZ(1.0) 1\nCNOT 2 1\nCNOT 3 2\n"
def test_numpy_integer_types():
(idx_np,) = np.arange(1, dtype=np.int64)
assert isinstance(idx_np, np.int64)
# on python 3 this fails unless explicitly allowing for numpy integer types
PauliTerm("X", idx_np)
def test_simplify():
t1 = sZ(0) * sZ(1)
t2 = sZ(0) * sZ(1)
assert (t1 + t2) == 2 * sZ(0) * sZ(1)
def test_dont_simplify():
t1 = sZ(0) * sZ(1)
t2 = sZ(2) * sZ(3)
assert (t1 + t2) != 2 * sZ(0) * sZ(1)
def test_simplify_warning():
t1 = sZ(0) * sZ(1)
t2 = sZ(1) * sZ(0)
tsum = t1 + t2
assert tsum == 2 * sZ(0) * sZ(1)
def test_pauli_string():
p = PauliTerm("X", 1) * PauliTerm("Z", 5)
assert p.pauli_string([1, 5]) == "XZ"
assert p.pauli_string([1]) == "X"
assert p.pauli_string([5]) == "Z"
assert p.pauli_string([5, 6]) == "ZI"
assert p.pauli_string([0, 1]) == "IX"
def test_str():
term = 2.0 * sX(1) * sX(2)
assert str(term) == "(2+0j)*X1*X2"
assert term.compact_str() == "(2+0j)*X1X2"
def test_from_str():
with pytest.raises(ValueError):
PauliTerm.from_compact_str("1*A0→1*Z0")
def test_is_identity():
pt1 = -1.5j * sI(2)
pt2 = 1.5 * sX(1) * sZ(2)
assert is_identity(pt1)
assert is_identity(pt2 + (-1 * pt2) + sI(0))
assert not is_identity(0 * pt1)
assert not is_identity(pt2 + (-1 * pt2))
def test_identity_no_qubit():
assert is_identity(sI())
def test_qubit_validation():
with pytest.raises(ValueError):
sX(None)
def test_pauli_term_from_str():
# tests that should _not_ fail are in test_pauli_sum_from_str
with pytest.raises(ValueError):
PauliTerm.from_compact_str("X0")
with pytest.raises(ValueError):
PauliTerm.from_compact_str("10")
with pytest.raises(ValueError):
PauliTerm.from_compact_str("1.0X0")
with pytest.raises(ValueError):
PauliTerm.from_compact_str("(1.0+9i)*X0")
with pytest.raises(ValueError):
PauliTerm.from_compact_str("(1.0+0j)*A0")
def test_pauli_sum_from_str():
# this also tests PauliTerm.from_compact_str() since it gets called
Sum = (1.5 + 0.5j) * sX(0) * sZ(2) + 0.7 * sZ(1)
another_str = "(1.5 + 0.5j)*X0*Z2+.7*Z1"
assert PauliSum.from_compact_str(str(Sum)) == Sum
assert PauliSum.from_compact_str(Sum.compact_str()) == Sum
assert PauliSum.from_compact_str(another_str) == Sum
# test sums of length one
Sum = PauliSum([1 * sY(0) * sY(1)])
the_str = "1*Y0*Y1"
assert PauliSum.from_compact_str(the_str) == Sum
# test sums containing the identity
Sum = (1.5 + 0.5j) * sX(0) * sZ(2) + 0.7 * sI(1)
the_str = "(1.5 + 0.5j)*X0*Z2+.7*I"
assert PauliSum.from_compact_str(the_str) == Sum
# test the simplification (both in sums and products)
Sum = PauliSum([2 * sY(1)])
the_str = "1*Y0*X0 + (0+1j)*Z0 + 2*Y1"
assert PauliSum.from_compact_str(the_str) == Sum
|
9269e83f7e4123b28f593080ea55cd0d90fbd787
|
86cc299c9b049c01f890ef08451aef8e8ead04b3
|
/Archived/02_LogisticRegression/103_decision_boundary_in_linear_r.py
|
9cb0b84825d70471a17efdbe671f9b0c35f43255
|
[] |
no_license
|
moon-hotel/MachineLearningWithMe
|
496a7249c6098d1547e020fbed2e3fd17593741c
|
611289b3bfd5315368df205319363f3a753733d9
|
refs/heads/master
| 2023-08-30T12:46:57.789006
| 2023-08-24T15:02:27
| 2023-08-24T15:02:27
| 251,615,055
| 173
| 38
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 997
|
py
|
103_decision_boundary_in_linear_r.py
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.linear_model import LinearRegression
def make_data():
num_points = 200
centers = [[1, 1], [2, 2]] # 指定中心
x, y = make_blobs(n_samples=num_points, centers=centers,
cluster_std=0.2, random_state=np.random.seed(10))
index_pos, index_neg = (y == 1), (y == 0)
x_pos, x_neg = x[index_pos], x[index_neg]
plt.scatter(x_pos[:, 0], x_pos[:, 1], marker='s', c='r')
plt.scatter(x_neg[:, 0], x_neg[:, 1], marker='o', c='orange')
plt.xticks([])
plt.yticks([])
return x, y
def decision_boundary(x, y):
########### 模型求解并预测
x, y = x[:, 0].reshape(-1, 1), x[:, 1]
model = LinearRegression()
model.fit(x, y)
y_pre = model.predict(x)
plt.plot(x, y_pre, c='r')
plt.tight_layout() # 调整子图间距
plt.show()
if __name__ == '__main__':
x, y = make_data()
decision_boundary(x, y)
|
055a07c02d0d497c185ee594ec36d29b1f224434
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/maps/azure-maps-geolocation/azure/maps/geolocation/_generated/models/__init__.py
|
06195698de8894c0cda1fb63553f9844de0e8801
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
__init__.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models import CountryRegion
from ._models import ErrorAdditionalInfo
from ._models import ErrorDetail
from ._models import ErrorResponse
from ._models import IpAddressToLocationResult
from ._enums import JsonFormat
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
'CountryRegion',
'ErrorAdditionalInfo',
'ErrorDetail',
'ErrorResponse',
'IpAddressToLocationResult',
'JsonFormat',
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
f46741a9e029c2a18077e581df6c11ba168467cc
|
e7e536df0263ae2a7ac44ef30f19110f891213a9
|
/src/pretalx/submission/models/tag.py
|
91884b5b91adc369b61b068e8a6688d27aee1efe
|
[
"Apache-2.0"
] |
permissive
|
pretalx/pretalx
|
b3b3808266f4810dfc8445dc1ed33ba398e7a9c2
|
269dce90a6fb1ce0064008c40ce5dd4dad61e2e3
|
refs/heads/main
| 2023-09-05T11:09:23.538325
| 2023-09-04T19:57:47
| 2023-09-04T19:57:47
| 83,081,285
| 563
| 195
|
Apache-2.0
| 2023-09-13T19:12:28
| 2017-02-24T20:46:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
tag.py
|
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from django_scopes import ScopedManager
from i18nfield.fields import I18nTextField
from pretalx.common.mixins.models import LogMixin
from pretalx.common.urls import EventUrls
class Tag(LogMixin, models.Model):
created = models.DateTimeField(null=True, auto_now_add=True)
event = models.ForeignKey(
to="event.Event", on_delete=models.PROTECT, related_name="tags"
)
tag = models.CharField(max_length=50)
description = I18nTextField(
verbose_name=_("Description"),
blank=True,
)
color = models.CharField(
max_length=7,
verbose_name=_("Color"),
validators=[
RegexValidator(r"#([0-9A-Fa-f]{3}){1,2}"),
],
)
public = models.BooleanField(
default=False,
verbose_name=_("Show tag publicly"),
help_text=_(
"Tags are currently only in use for organisers and reviewers. They will be visible publicly in a future release of pretalx."
),
)
objects = ScopedManager(event="event")
class urls(EventUrls):
base = edit = "{self.event.orga_urls.tags}{self.pk}/"
delete = "{base}delete"
def __str__(self) -> str:
return str(self.tag)
|
efe5ac38cdd2faf9ef528f3cc14a58da633abb96
|
4cccbd59c06b10f3bbe1d5e8c3082c8b0c9a8145
|
/edb/schema/globals.py
|
ea2b76cb849e227cd25db732c7fde6b65a17e671
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
edgedb/edgedb
|
ff26656ee449208b88ae85a6ad9823fce4f2ecad
|
4d614ce5de15e0b08575b0bf6738ece02c516ded
|
refs/heads/master
| 2023-09-05T07:10:05.409260
| 2023-09-01T23:20:13
| 2023-09-01T23:20:13
| 95,817,032
| 11,683
| 404
|
Apache-2.0
| 2023-09-14T17:25:49
| 2017-06-29T20:30:48
|
Python
|
UTF-8
|
Python
| false
| false
| 20,109
|
py
|
globals.py
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
from edb import errors
from edb.common import struct
from edb.edgeql import ast as qlast
from edb.edgeql import compiler as qlcompiler
from edb.edgeql import qltypes
from . import annos as s_anno
from . import delta as sd
from . import expr as s_expr
from . import expraliases as s_expraliases
from . import name as sn
from . import objects as so
from . import types as s_types
from . import utils
if TYPE_CHECKING:
from edb.schema import schema as s_schema
class Global(
so.QualifiedObject,
s_anno.AnnotationSubject,
qlkind=qltypes.SchemaObjectClass.GLOBAL,
data_safe=True,
):
target = so.SchemaField(
s_types.Type,
compcoef=0.85,
special_ddl_syntax=True,
)
required = so.SchemaField(
bool,
default=False,
compcoef=0.909,
special_ddl_syntax=True,
describe_visibility=(
so.DescribeVisibilityPolicy.SHOW_IF_EXPLICIT_OR_DERIVED
),
)
cardinality = so.SchemaField(
qltypes.SchemaCardinality,
default=qltypes.SchemaCardinality.One,
compcoef=0.833,
coerce=True,
special_ddl_syntax=True,
describe_visibility=(
so.DescribeVisibilityPolicy.SHOW_IF_EXPLICIT_OR_DERIVED
),
)
# Computable globals have this set to an expression
# defining them.
expr = so.SchemaField(
s_expr.Expression,
default=None,
coerce=True,
compcoef=0.909,
special_ddl_syntax=True,
)
default = so.SchemaField(
s_expr.Expression,
allow_ddl_set=True,
default=None,
coerce=True,
compcoef=0.909,
)
def is_computable(self, schema: s_schema.Schema) -> bool:
return bool(self.get_expr(schema))
def needs_present_arg(self, schema: s_schema.Schema) -> bool:
return bool(self.get_default(schema)) and not self.get_required(schema)
class GlobalCommandContext(
sd.ObjectCommandContext[so.Object],
s_anno.AnnotationSubjectCommandContext
):
pass
class GlobalCommand(
s_expraliases.AliasLikeCommand[Global],
context_class=GlobalCommandContext,
):
TYPE_FIELD_NAME = 'target'
@classmethod
def _get_alias_name(cls, type_name: sn.QualName) -> sn.QualName:
return cls._mangle_name(type_name)
@classmethod
def _is_alias(cls, obj: Global, schema: s_schema.Schema) -> bool:
return obj.is_computable(schema)
def _check_expr(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
expression = self.get_attribute_value('expr')
assert isinstance(expression, s_expr.Expression)
# If it's not compiled, don't worry about it. This should just
# be a dummy expression.
if not expression.irast:
return schema
required, card = expression.irast.cardinality.to_schema_value()
spec_required: Optional[bool] = (
self.get_specified_attribute_value('required', schema, context))
spec_card: Optional[qltypes.SchemaCardinality] = (
self.get_specified_attribute_value('cardinality', schema, context))
glob_name = self.get_verbosename()
if spec_required and not required:
srcctx = self.get_attribute_source_context('target')
raise errors.SchemaDefinitionError(
f'possibly an empty set returned by an '
f'expression for the computed '
f'{glob_name} '
f"explicitly declared as 'required'",
context=srcctx
)
if (
spec_card is qltypes.SchemaCardinality.One
and card is not qltypes.SchemaCardinality.One
):
srcctx = self.get_attribute_source_context('target')
raise errors.SchemaDefinitionError(
f'possibly more than one element returned by an '
f'expression for the computed '
f'{glob_name} '
f"explicitly declared as 'single'",
context=srcctx
)
if spec_card is None:
self.set_attribute_value('cardinality', card, computed=True)
if spec_required is None:
self.set_attribute_value('required', required, computed=True)
return schema
def canonicalize_attributes(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().canonicalize_attributes(schema, context)
if self.get_attribute_value('expr'):
schema = self._check_expr(schema, context)
schema = s_types.materialize_type_in_attribute(
schema, context, self, 'target')
return schema
def validate_object(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
scls = self.scls
is_computable = scls.is_computable(schema)
target = scls.get_target(schema)
if not is_computable:
if (
scls.get_required(schema)
and not scls.get_default(schema)
):
raise errors.SchemaDefinitionError(
"required globals must have a default",
context=self.source_context,
)
if scls.get_cardinality(schema) == qltypes.SchemaCardinality.Many:
raise errors.SchemaDefinitionError(
"non-computed globals may not be multi",
context=self.source_context,
)
if target.contains_object(schema):
raise errors.SchemaDefinitionError(
"non-computed globals may not have have object type",
context=self.source_context,
)
default_expr = scls.get_default(schema)
if default_expr is not None:
default_expr = default_expr.ensure_compiled(schema)
default_schema = default_expr.irast.schema
default_type = default_expr.irast.stype
source_context = self.get_attribute_source_context('default')
if is_computable:
raise errors.SchemaDefinitionError(
f'computed globals may not have default values',
context=source_context,
)
if not default_type.assignment_castable_to(target, default_schema):
raise errors.SchemaDefinitionError(
f'default expression is of invalid type: '
f'{default_type.get_displayname(default_schema)}, '
f'expected {target.get_displayname(schema)}',
context=source_context,
)
ptr_cardinality = scls.get_cardinality(schema)
default_required, default_cardinality = \
default_expr.irast.cardinality.to_schema_value()
if (ptr_cardinality is qltypes.SchemaCardinality.One
and default_cardinality != ptr_cardinality):
raise errors.SchemaDefinitionError(
f'possibly more than one element returned by '
f'the default expression for '
f'{scls.get_verbosename(schema)} declared as '
f"'single'",
context=source_context,
)
if scls.get_required(schema) and not default_required:
raise errors.SchemaDefinitionError(
f'possibly no elements returned by '
f'the default expression for '
f'{scls.get_verbosename(schema)} declared as '
f"'required'",
context=source_context,
)
if default_expr.irast.volatility.is_volatile():
raise errors.SchemaDefinitionError(
f'{scls.get_verbosename(schema)} has a volatile '
f'default expression, which is not allowed',
context=source_context,
)
def get_dummy_expr_field_value(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
field: so.Field[Any],
value: Any,
) -> Optional[s_expr.Expression]:
if field.name in ('expr', 'default'):
rt = self.scls.get_target(schema)
if isinstance(rt, so.DerivableInheritingObject):
rt = rt.get_nearest_non_derived_parent(schema)
text = f'SELECT assert_exists(<{rt.get_displayname(schema)}>{{}})'
return s_expr.Expression(text=text)
else:
raise NotImplementedError(f'unhandled field {field.name!r}')
def compile_expr_field(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
field: so.Field[Any],
value: s_expr.Expression,
track_schema_ref_exprs: bool=False,
) -> s_expr.CompiledExpression:
if field.name in {'default', 'expr'}:
ptr_name = self.get_verbosename()
in_ddl_context_name = None
if field.name == 'expr':
in_ddl_context_name = f'computed {ptr_name}'
return value.compiled(
schema=schema,
options=qlcompiler.CompilerOptions(
modaliases=context.modaliases,
schema_object_context=self.get_schema_metaclass(),
apply_query_rewrites=not context.stdmode,
track_schema_ref_exprs=track_schema_ref_exprs,
in_ddl_context_name=in_ddl_context_name,
),
)
else:
return super().compile_expr_field(
schema, context, field, value, track_schema_ref_exprs)
class CreateGlobal(
s_expraliases.CreateAliasLike[Global],
GlobalCommand,
):
astnode = qlast.CreateGlobal
def get_ast_attr_for_field(
self,
field: str,
astnode: Type[qlast.DDLOperation],
) -> Optional[str]:
if (
field == 'required'
and issubclass(astnode, qlast.CreateGlobal)
):
return 'is_required'
elif (
field == 'cardinality'
and issubclass(astnode, qlast.CreateGlobal)
):
return 'cardinality'
else:
return super().get_ast_attr_for_field(field, astnode)
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
assert isinstance(node, qlast.CreateGlobal)
if op.property == 'target':
if not node.target:
expr = self.get_attribute_value('expr')
if expr is not None:
node.target = expr.qlast
else:
t = op.new_value
assert isinstance(t, (so.Object, so.ObjectShell))
node.target = utils.typeref_to_ast(schema, t)
else:
super()._apply_field_ast(schema, context, node, op)
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(astnode, qlast.CreateGlobal)
assert isinstance(cmd, GlobalCommand)
if astnode.is_required is not None:
cmd.set_attribute_value(
'required',
astnode.is_required,
source_context=astnode.context,
)
if astnode.cardinality is not None:
cmd.set_attribute_value(
'cardinality',
astnode.cardinality,
source_context=astnode.context,
)
assert astnode.target is not None
if isinstance(astnode.target, qlast.TypeExpr):
type_ref = utils.ast_to_type_shell(
astnode.target,
metaclass=s_types.Type,
modaliases=context.modaliases,
schema=schema,
)
cmd.set_attribute_value(
'target',
type_ref,
source_context=astnode.target.context,
)
else:
# computable
qlcompiler.normalize(
astnode.target,
schema=schema,
modaliases=context.modaliases
)
cmd.set_attribute_value(
'expr',
s_expr.Expression.from_ast(
astnode.target, schema, context.modaliases,
context.localnames,
),
)
if (
cmd.has_attribute_value('expr')
and cmd.has_attribute_value('target')
):
raise errors.UnsupportedFeatureError(
"cannot specify a type and an expression for a global",
context=astnode.context,
)
return cmd
class RenameGlobal(
s_expraliases.RenameAliasLike[Global],
GlobalCommand,
):
pass
class AlterGlobal(
s_expraliases.AlterAliasLike[Global],
GlobalCommand,
):
astnode = qlast.AlterGlobal
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
if not context.canonical:
old_expr = self.scls.get_expr(schema)
has_expr = self.has_attribute_value('expr')
clears_expr = has_expr and not self.get_attribute_value('expr')
# Force reconsideration of the expression if cardinality
# or required is changed.
if (
(
self.has_attribute_value('cardinality')
or self.has_attribute_value('required')
)
and not has_expr
and old_expr
):
self.set_attribute_value(
'expr',
s_expr.Expression.not_compiled(old_expr)
)
# Produce an error when setting a type on something with
# an expression
if (
self.get_attribute_value('target')
and (
(self.scls.get_expr(schema) or has_expr)
and not clears_expr
)
):
raise errors.UnsupportedFeatureError(
"cannot specify a type and an expression for a global",
context=self.source_context,
)
if clears_expr and old_expr:
# If the expression was explicitly set to None,
# that means that `RESET EXPRESSION` was executed
# and this is no longer a computable.
computed_fields = self.scls.get_computed_fields(schema)
if (
'required' in computed_fields
and not self.has_attribute_value('required')
):
self.set_attribute_value('required', None)
if (
'cardinality' in computed_fields
and not self.has_attribute_value('cardinality')
):
self.set_attribute_value('cardinality', None)
return super()._alter_begin(schema, context)
class SetGlobalType(
sd.AlterSpecialObjectField[Global],
field='target',
):
cast_expr = struct.Field(s_expr.Expression, default=None)
reset_value = struct.Field(bool, default=False)
def get_verb(self) -> str:
return 'alter the type of'
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = super()._alter_begin(schema, context)
scls = self.scls
orig_target = scls.get_explicit_field_value(
orig_schema, 'target', None)
new_target = scls.get_target(schema)
if not orig_target or orig_target == new_target:
return schema
if not context.canonical:
if self.cast_expr:
raise errors.UnsupportedFeatureError(
f'USING casts for SET TYPE on globals are not supported',
hint='Use RESET TO DEFAULT instead',
context=self.source_context,
)
if not self.reset_value:
raise errors.SchemaDefinitionError(
f"SET TYPE on global must explicitly reset the "
f"global's value",
hint='Use RESET TO DEFAULT after the type',
context=self.source_context,
)
return schema
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, SetGlobalType)
if (
isinstance(astnode, qlast.SetGlobalType)
and astnode.cast_expr is not None
):
cmd.cast_expr = s_expr.Expression.from_ast(
astnode.cast_expr,
schema,
context.modaliases,
context.localnames,
)
if isinstance(astnode, qlast.SetGlobalType):
cmd.reset_value = astnode.reset_value
return cmd
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
set_field = super()._get_ast(schema, context, parent_node=parent_node)
if set_field is None or self.is_attribute_computed('target'):
return None
else:
assert isinstance(set_field, qlast.SetField)
assert not isinstance(set_field.value, qlast.Expr)
return qlast.SetGlobalType(
value=set_field.value,
cast_expr=(
self.cast_expr.qlast
if self.cast_expr is not None else None
),
reset_value=self.reset_value,
)
def record_diff_annotations(
self, *,
schema: s_schema.Schema,
orig_schema: Optional[s_schema.Schema],
context: so.ComparisonContext,
object: Optional[so.Object],
orig_object: Optional[so.Object],
) -> None:
super().record_diff_annotations(
schema=schema,
orig_schema=orig_schema,
context=context,
orig_object=orig_object,
object=object,
)
if orig_schema is None:
return
if (
not self.get_orig_attribute_value('expr')
and not self.get_attribute_value('expr')
):
self.reset_value = True
class DeleteGlobal(
s_expraliases.DeleteAliasLike[Global],
GlobalCommand,
):
astnode = qlast.DropGlobal
|
7117c32051f42fa4d4ac6a1960ddaca0ac134c85
|
0e0ddc095823c54877c143adacbfcdd6355261de
|
/libqtile/layout/ratiotile.py
|
e3e69887dcad4e2fc225abd1117ef40689719978
|
[
"MIT"
] |
permissive
|
qtile/qtile
|
b19108ca632871104a0783a4afbe7350a17b97db
|
3f8a00082ad880042d396477d9445954e8d29cf2
|
refs/heads/master
| 2023-09-01T19:31:09.419767
| 2023-09-01T19:10:00
| 2023-09-01T19:10:00
| 47,476
| 4,203
| 986
|
MIT
| 2023-09-11T21:21:56
| 2008-08-30T00:16:40
|
Python
|
UTF-8
|
Python
| false
| false
| 11,268
|
py
|
ratiotile.py
|
# -*- coding:utf-8 -*-
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2012-2013, 2015 Tycho Andersen
# Copyright (c) 2013 Björn Lindström
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 dmpayton
# Copyright (c) 2014 dequis
# Copyright (c) 2017 Dirk Hartmann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
from libqtile.command.base import expose_command
from libqtile.layout.base import _SimpleLayoutBase
ROWCOL = 1 # do rows at a time left to right top down
COLROW = 2 # do cols top to bottom, left to right
GOLDEN_RATIO = 1.618
class GridInfo:
"""
Calculates sizes for grids
>>> gi = GridInfo(.5, 5, 600, 480)
>>> gi.calc()
(1, 5, 1)
>>> gi.get_sizes()
[(0, 0, 120, 480), (120, 0, 120, 480), (240, 0, 120, 480), (360, 0, 120, 480), (480, 0, 120, 480)]
>>> gi = GridInfo(6, 5, 600, 480)
>>> gi.get_sizes()
[(0, 0, 600, 96), (0, 96, 600, 96), (0, 192, 600, 96), (0, 288, 600, 96), (0, 384, 600, 96)]
>>> gi = GridInfo(1, 5, 600, 480)
>>> gi.get_sizes()
[(0, 0, 200, 240), (200, 0, 200, 240), (400, 0, 200, 240), (0, 240, 300, 240), (200, 240, 200, 240)]
>>> foo = GridInfo(1.6, 7, 400,370)
>>> foo.get_sizes(500,580)
"""
def __init__(self, ratio, num_windows, width, height):
self.ratio = ratio
self.num_windows = num_windows
self.width = width
self.height = height
self.num_rows = 0
self.num_cols = 0
def calc(self, num_windows, width, height):
"""returns (rows, cols, orientation) tuple given input"""
best_ratio = None
best_rows_cols_orientation = None
for rows, cols, orientation in self._possible_grids(num_windows):
sample_width = width / cols
sample_height = height / rows
sample_ratio = sample_width / sample_height
diff = abs(sample_ratio - self.ratio)
if best_ratio is None or diff < best_ratio:
best_ratio = diff
best_rows_cols_orientation = (rows, cols, orientation)
return best_rows_cols_orientation
def _possible_grids(self, num_windows):
"""
iterates over possible grids given a number of windows
"""
if num_windows < 2:
end = 2
else:
end = num_windows // 2 + 1
for rows in range(1, end):
cols = int(math.ceil(num_windows / rows))
yield (rows, cols, ROWCOL)
if rows != cols:
# also want the reverse test
yield (cols, rows, COLROW)
def get_sizes_advanced(self, total_width, total_height, xoffset=0, yoffset=0):
"""after every row/column recalculate remaining area"""
results = []
width = total_width
height = total_height
while len(results) < self.num_windows:
remaining = self.num_windows - len(results)
orien, sizes = self._get_row_or_col(remaining, width, height, xoffset, yoffset)
results.extend(sizes)
if orien == ROWCOL:
# adjust height/yoffset
height -= sizes[-1][-1]
yoffset += sizes[-1][-1]
else:
width -= sizes[-1][-2]
xoffset += sizes[-1][-2]
return results
def _get_row_or_col(self, num_windows, width, height, xoffset, yoffset):
"""process one row (or col) at a time"""
rows, cols, orientation = self.calc(num_windows, width, height)
results = []
if orientation == ROWCOL:
x = 0
y = 0
for i, col in enumerate(range(cols)):
w_width = width // cols
w_height = height // rows
if i == cols - 1:
w_width = width - x
results.append((x + xoffset, y + yoffset, w_width, w_height))
x += w_width
elif orientation == COLROW:
x = 0
y = 0
for i, col in enumerate(range(rows)):
w_width = width // cols
w_height = height // rows
if i == rows - 1:
w_height = height - y
results.append((x + xoffset, y + yoffset, w_width, w_height))
y += w_height
return orientation, results
def get_sizes(self, total_width, total_height, xoffset=0, yoffset=0):
width = 0
height = 0
results = []
rows, cols, orientation = self.calc(self.num_windows, total_width, total_height)
if orientation == ROWCOL:
y = 0
for i, row in enumerate(range(rows)):
x = 0
width = total_width // cols
for j, col in enumerate(range(cols)):
height = total_height // rows
if i == rows - 1 and j == 0:
# last row
remaining = self.num_windows - len(results)
width = total_width // remaining
elif j == cols - 1 or len(results) + 1 == self.num_windows:
# since we are dealing with integers,
# make last column (or item) take up remaining space
width = total_width - x
results.append((x + xoffset, y + yoffset, width, height))
if len(results) == self.num_windows:
return results
x += width
y += height
else:
x = 0
for i, col in enumerate(range(cols)):
y = 0
height = total_height // rows
for j, row in enumerate(range(rows)):
width = total_width // cols
# down first
if i == cols - 1 and j == 0:
remaining = self.num_windows - len(results)
height = total_height // remaining
elif j == rows - 1 or len(results) + 1 == self.num_windows:
height = total_height - y
results.append(
(
x + xoffset, # i * width + xoffset,
y + yoffset, # j * height + yoffset,
width,
height,
)
)
if len(results) == self.num_windows:
return results
y += height
x += width
return results
class RatioTile(_SimpleLayoutBase):
"""Tries to tile all windows in the width/height ratio passed in"""
defaults = [
("border_focus", "#0000ff", "Border colour(s) for the focused window."),
("border_normal", "#000000", "Border colour(s) for un-focused windows."),
("border_width", 1, "Border width."),
("margin", 0, "Margin of the layout (int or list of ints [N E S W])"),
("ratio", GOLDEN_RATIO, "Ratio of the tiles"),
("ratio_increment", 0.1, "Amount to increment per ratio increment"),
("fancy", False, "Use a different method to calculate window sizes."),
]
def __init__(self, **config):
_SimpleLayoutBase.__init__(self, **config)
self.add_defaults(RatioTile.defaults)
self.dirty = True # need to recalculate
self.layout_info = []
self.last_size = None
self.last_screen = None
def clone(self, group):
return _SimpleLayoutBase.clone(self, group)
def add_client(self, w):
self.dirty = True
self.clients.append_head(w)
def remove(self, w):
self.dirty = True
return _SimpleLayoutBase.remove(self, w)
def configure(self, win, screen):
# force recalc
if not self.last_screen or self.last_screen != screen:
self.last_screen = screen
self.dirty = True
if self.last_size and not self.dirty:
if screen.width != self.last_size[0] or screen.height != self.last_size[1]:
self.dirty = True
if self.dirty:
gi = GridInfo(self.ratio, len(self.clients), screen.width, screen.height)
self.last_size = (screen.width, screen.height)
if self.fancy:
method = gi.get_sizes_advanced
else:
method = gi.get_sizes
self.layout_info = method(screen.width, screen.height, screen.x, screen.y)
self.dirty = False
try:
idx = self.clients.index(win)
except ValueError:
win.hide()
return
x, y, w, h = self.layout_info[idx]
if win.has_focus:
bc = self.border_focus
else:
bc = self.border_normal
win.place(
x,
y,
w - self.border_width * 2,
h - self.border_width * 2,
self.border_width,
bc,
margin=self.margin,
)
win.unhide()
@expose_command()
def info(self):
d = _SimpleLayoutBase.info(self)
focused = self.clients.current_client
d["ratio"] = self.ratio
d["focused"] = focused.name if focused else None
d["layout_info"] = self.layout_info
return d
@expose_command("down")
def previous(self):
_SimpleLayoutBase.previous(self)
@expose_command("up")
def next(self):
_SimpleLayoutBase.next(self)
@expose_command()
def shuffle_down(self):
if self.clients:
self.clients.rotate_up()
self.group.layout_all()
@expose_command()
def shuffle_up(self):
if self.clients:
self.clients.rotate_down()
self.group.layout_all()
@expose_command()
def decrease_ratio(self):
new_ratio = self.ratio - self.ratio_increment
if new_ratio < 0:
return
self.ratio = new_ratio
self.group.layout_all()
@expose_command()
def increase_ratio(self):
self.ratio += self.ratio_increment
self.group.layout_all()
|
afd6b21a8453b329a97c442227319c5054caaea5
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/gen/view_models/views/lobby/wt_event/wt_bonus_model.py
|
7ad8cf157eed13316b5c13b9d194ee385260bcbe
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
wt_bonus_model.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/wt_event/wt_bonus_model.py
from enum import Enum
from gui.impl.gen.view_models.common.missions.bonuses.icon_bonus_model import IconBonusModel
class TypeIcon(Enum):
PROJECTIONDECAL = 'projectionDecal'
STYLE = 'style'
DECAL = 'decal'
CAMOUFLAGE = 'camouflage'
EMBLEM = 'emblem'
INSCRIPTION = 'inscription'
class WtBonusModel(IconBonusModel):
__slots__ = ()
def __init__(self, properties=10, commands=0):
super(WtBonusModel, self).__init__(properties=properties, commands=commands)
def getSpecialId(self):
return self._getNumber(8)
def setSpecialId(self, value):
self._setNumber(8, value)
def getTypeIcon(self):
return TypeIcon(self._getString(9))
def setTypeIcon(self, value):
self._setString(9, value.value)
def _initialize(self):
super(WtBonusModel, self)._initialize()
self._addNumberProperty('specialId', 0)
self._addStringProperty('typeIcon')
|
633bec6754dd9f63cc9861f9b75454828f1cbe8e
|
07df6279388a17192eb4e4e417383a1f56208839
|
/mmdet3d/models/segmentors/encoder_decoder.py
|
1a4fee935714629cbfdc5115857e45e07d9376f8
|
[
"Apache-2.0"
] |
permissive
|
HuangJunJie2017/BEVDet
|
11d4ca45286739c9bd099f715cb0edc9408a914f
|
f71858d02eb0fbd09860150ade67558d7984b1be
|
refs/heads/dev2.1
| 2023-05-23T15:35:45.216750
| 2023-05-07T16:35:04
| 2023-05-07T16:35:04
| 432,979,408
| 985
| 192
|
Apache-2.0
| 2023-04-28T15:06:51
| 2021-11-29T09:28:12
|
Python
|
UTF-8
|
Python
| false
| false
| 19,008
|
py
|
encoder_decoder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from torch import nn as nn
from torch.nn import functional as F
from mmseg.core import add_prefix
from ..builder import (SEGMENTORS, build_backbone, build_head, build_loss,
build_neck)
from .base import Base3DSegmentor
@SEGMENTORS.register_module()
class EncoderDecoder3D(Base3DSegmentor):
"""3D Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be thrown during inference.
"""
def __init__(self,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
loss_regularization=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(EncoderDecoder3D, self).__init__(init_cfg=init_cfg)
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
self._init_decode_head(decode_head)
self._init_auxiliary_head(auxiliary_head)
self._init_loss_regularization(loss_regularization)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
assert self.with_decode_head, \
'3D EncoderDecoder Segmentor should have a decode_head'
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = build_head(decode_head)
self.num_classes = self.decode_head.num_classes
def _init_auxiliary_head(self, auxiliary_head):
"""Initialize ``auxiliary_head``"""
if auxiliary_head is not None:
if isinstance(auxiliary_head, list):
self.auxiliary_head = nn.ModuleList()
for head_cfg in auxiliary_head:
self.auxiliary_head.append(build_head(head_cfg))
else:
self.auxiliary_head = build_head(auxiliary_head)
def _init_loss_regularization(self, loss_regularization):
"""Initialize ``loss_regularization``"""
if loss_regularization is not None:
if isinstance(loss_regularization, list):
self.loss_regularization = nn.ModuleList()
for loss_cfg in loss_regularization:
self.loss_regularization.append(build_loss(loss_cfg))
else:
self.loss_regularization = build_loss(loss_regularization)
def extract_feat(self, points):
"""Extract features from points."""
x = self.backbone(points)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, points, img_metas):
"""Encode points with backbone and decode into a semantic segmentation
map of the same size as input.
Args:
points (torch.Tensor): Input points of shape [B, N, 3+C].
img_metas (list[dict]): Meta information of each sample.
Returns:
torch.Tensor: Segmentation logits of shape [B, num_classes, N].
"""
x = self.extract_feat(points)
out = self._decode_head_forward_test(x, img_metas)
return out
def _decode_head_forward_train(self, x, img_metas, pts_semantic_mask):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head.forward_train(x, img_metas,
pts_semantic_mask,
self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode'))
return losses
def _decode_head_forward_test(self, x, img_metas):
"""Run forward function and calculate loss for decode head in
inference."""
seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg)
return seg_logits
def _auxiliary_head_forward_train(self, x, img_metas, pts_semantic_mask):
"""Run forward function and calculate loss for auxiliary head in
training."""
losses = dict()
if isinstance(self.auxiliary_head, nn.ModuleList):
for idx, aux_head in enumerate(self.auxiliary_head):
loss_aux = aux_head.forward_train(x, img_metas,
pts_semantic_mask,
self.train_cfg)
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
else:
loss_aux = self.auxiliary_head.forward_train(
x, img_metas, pts_semantic_mask, self.train_cfg)
losses.update(add_prefix(loss_aux, 'aux'))
return losses
def _loss_regularization_forward_train(self):
"""Calculate regularization loss for model weight in training."""
losses = dict()
if isinstance(self.loss_regularization, nn.ModuleList):
for idx, regularize_loss in enumerate(self.loss_regularization):
loss_regularize = dict(
loss_regularize=regularize_loss(self.modules()))
losses.update(add_prefix(loss_regularize, f'regularize_{idx}'))
else:
loss_regularize = dict(
loss_regularize=self.loss_regularization(self.modules()))
losses.update(add_prefix(loss_regularize, 'regularize'))
return losses
def forward_dummy(self, points):
"""Dummy forward function."""
seg_logit = self.encode_decode(points, None)
return seg_logit
def forward_train(self, points, img_metas, pts_semantic_mask):
"""Forward function for training.
Args:
points (list[torch.Tensor]): List of points of shape [N, C].
img_metas (list): Image metas.
pts_semantic_mask (list[torch.Tensor]): List of point-wise semantic
labels of shape [N].
Returns:
dict[str, Tensor]: Losses.
"""
points_cat = torch.stack(points)
pts_semantic_mask_cat = torch.stack(pts_semantic_mask)
# extract features using backbone
x = self.extract_feat(points_cat)
losses = dict()
loss_decode = self._decode_head_forward_train(x, img_metas,
pts_semantic_mask_cat)
losses.update(loss_decode)
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
x, img_metas, pts_semantic_mask_cat)
losses.update(loss_aux)
if self.with_regularization_loss:
loss_regularize = self._loss_regularization_forward_train()
losses.update(loss_regularize)
return losses
@staticmethod
def _input_generation(coords,
patch_center,
coord_max,
feats,
use_normalized_coord=False):
"""Generating model input.
Generate input by subtracting patch center and adding additional
features. Currently support colors and normalized xyz as features.
Args:
coords (torch.Tensor): Sampled 3D point coordinate of shape [S, 3].
patch_center (torch.Tensor): Center coordinate of the patch.
coord_max (torch.Tensor): Max coordinate of all 3D points.
feats (torch.Tensor): Features of sampled points of shape [S, C].
use_normalized_coord (bool, optional): Whether to use normalized
xyz as additional features. Defaults to False.
Returns:
torch.Tensor: The generated input data of shape [S, 3+C'].
"""
# subtract patch center, the z dimension is not centered
centered_coords = coords.clone()
centered_coords[:, 0] -= patch_center[0]
centered_coords[:, 1] -= patch_center[1]
# normalized coordinates as extra features
if use_normalized_coord:
normalized_coord = coords / coord_max
feats = torch.cat([feats, normalized_coord], dim=1)
points = torch.cat([centered_coords, feats], dim=1)
return points
def _sliding_patch_generation(self,
points,
num_points,
block_size,
sample_rate=0.5,
use_normalized_coord=False,
eps=1e-3):
"""Sampling points in a sliding window fashion.
First sample patches to cover all the input points.
Then sample points in each patch to batch points of a certain number.
Args:
points (torch.Tensor): Input points of shape [N, 3+C].
num_points (int): Number of points to be sampled in each patch.
block_size (float, optional): Size of a patch to sample.
sample_rate (float, optional): Stride used in sliding patch.
Defaults to 0.5.
use_normalized_coord (bool, optional): Whether to use normalized
xyz as additional features. Defaults to False.
eps (float, optional): A value added to patch boundary to guarantee
points coverage. Defaults to 1e-3.
Returns:
np.ndarray | np.ndarray:
- patch_points (torch.Tensor): Points of different patches of
shape [K, N, 3+C].
- patch_idxs (torch.Tensor): Index of each point in
`patch_points`, of shape [K, N].
"""
device = points.device
# we assume the first three dims are points' 3D coordinates
# and the rest dims are their per-point features
coords = points[:, :3]
feats = points[:, 3:]
coord_max = coords.max(0)[0]
coord_min = coords.min(0)[0]
stride = block_size * sample_rate
num_grid_x = int(
torch.ceil((coord_max[0] - coord_min[0] - block_size) /
stride).item() + 1)
num_grid_y = int(
torch.ceil((coord_max[1] - coord_min[1] - block_size) /
stride).item() + 1)
patch_points, patch_idxs = [], []
for idx_y in range(num_grid_y):
s_y = coord_min[1] + idx_y * stride
e_y = torch.min(s_y + block_size, coord_max[1])
s_y = e_y - block_size
for idx_x in range(num_grid_x):
s_x = coord_min[0] + idx_x * stride
e_x = torch.min(s_x + block_size, coord_max[0])
s_x = e_x - block_size
# extract points within this patch
cur_min = torch.tensor([s_x, s_y, coord_min[2]]).to(device)
cur_max = torch.tensor([e_x, e_y, coord_max[2]]).to(device)
cur_choice = ((coords >= cur_min - eps) &
(coords <= cur_max + eps)).all(dim=1)
if not cur_choice.any(): # no points in this patch
continue
# sample points in this patch to multiple batches
cur_center = cur_min + block_size / 2.0
point_idxs = torch.nonzero(cur_choice, as_tuple=True)[0]
num_batch = int(np.ceil(point_idxs.shape[0] / num_points))
point_size = int(num_batch * num_points)
replace = point_size > 2 * point_idxs.shape[0]
num_repeat = point_size - point_idxs.shape[0]
if replace: # duplicate
point_idxs_repeat = point_idxs[torch.randint(
0, point_idxs.shape[0],
size=(num_repeat, )).to(device)]
else:
point_idxs_repeat = point_idxs[torch.randperm(
point_idxs.shape[0])[:num_repeat]]
choices = torch.cat([point_idxs, point_idxs_repeat], dim=0)
choices = choices[torch.randperm(choices.shape[0])]
# construct model input
point_batches = self._input_generation(
coords[choices],
cur_center,
coord_max,
feats[choices],
use_normalized_coord=use_normalized_coord)
patch_points.append(point_batches)
patch_idxs.append(choices)
patch_points = torch.cat(patch_points, dim=0)
patch_idxs = torch.cat(patch_idxs, dim=0)
# make sure all points are sampled at least once
assert torch.unique(patch_idxs).shape[0] == points.shape[0], \
'some points are not sampled in sliding inference'
return patch_points, patch_idxs
def slide_inference(self, point, img_meta, rescale):
"""Inference by sliding-window with overlap.
Args:
point (torch.Tensor): Input points of shape [N, 3+C].
img_meta (dict): Meta information of input sample.
rescale (bool): Whether transform to original number of points.
Will be used for voxelization based segmentors.
Returns:
Tensor: The output segmentation map of shape [num_classes, N].
"""
num_points = self.test_cfg.num_points
block_size = self.test_cfg.block_size
sample_rate = self.test_cfg.sample_rate
use_normalized_coord = self.test_cfg.use_normalized_coord
batch_size = self.test_cfg.batch_size * num_points
# patch_points is of shape [K*N, 3+C], patch_idxs is of shape [K*N]
patch_points, patch_idxs = self._sliding_patch_generation(
point, num_points, block_size, sample_rate, use_normalized_coord)
feats_dim = patch_points.shape[1]
seg_logits = [] # save patch predictions
for batch_idx in range(0, patch_points.shape[0], batch_size):
batch_points = patch_points[batch_idx:batch_idx + batch_size]
batch_points = batch_points.view(-1, num_points, feats_dim)
# batch_seg_logit is of shape [B, num_classes, N]
batch_seg_logit = self.encode_decode(batch_points, img_meta)
batch_seg_logit = batch_seg_logit.transpose(1, 2).contiguous()
seg_logits.append(batch_seg_logit.view(-1, self.num_classes))
# aggregate per-point logits by indexing sum and dividing count
seg_logits = torch.cat(seg_logits, dim=0) # [K*N, num_classes]
expand_patch_idxs = patch_idxs.unsqueeze(1).repeat(1, self.num_classes)
preds = point.new_zeros((point.shape[0], self.num_classes)).\
scatter_add_(dim=0, index=expand_patch_idxs, src=seg_logits)
count_mat = torch.bincount(patch_idxs)
preds = preds / count_mat[:, None]
# TODO: if rescale and voxelization segmentor
return preds.transpose(0, 1) # to [num_classes, K*N]
def whole_inference(self, points, img_metas, rescale):
"""Inference with full scene (one forward pass without sliding)."""
seg_logit = self.encode_decode(points, img_metas)
# TODO: if rescale and voxelization segmentor
return seg_logit
def inference(self, points, img_metas, rescale):
"""Inference with slide/whole style.
Args:
points (torch.Tensor): Input points of shape [B, N, 3+C].
img_metas (list[dict]): Meta information of each sample.
rescale (bool): Whether transform to original number of points.
Will be used for voxelization based segmentors.
Returns:
Tensor: The output segmentation map.
"""
assert self.test_cfg.mode in ['slide', 'whole']
if self.test_cfg.mode == 'slide':
seg_logit = torch.stack([
self.slide_inference(point, img_meta, rescale)
for point, img_meta in zip(points, img_metas)
], 0)
else:
seg_logit = self.whole_inference(points, img_metas, rescale)
output = F.softmax(seg_logit, dim=1)
return output
def simple_test(self, points, img_metas, rescale=True):
"""Simple test with single scene.
Args:
points (list[torch.Tensor]): List of points of shape [N, 3+C].
img_metas (list[dict]): Meta information of each sample.
rescale (bool): Whether transform to original number of points.
Will be used for voxelization based segmentors.
Defaults to True.
Returns:
list[dict]: The output prediction result with following keys:
- semantic_mask (Tensor): Segmentation mask of shape [N].
"""
# 3D segmentation requires per-point prediction, so it's impossible
# to use down-sampling to get a batch of scenes with same num_points
# therefore, we only support testing one scene every time
seg_pred = []
for point, img_meta in zip(points, img_metas):
seg_prob = self.inference(point.unsqueeze(0), [img_meta],
rescale)[0]
seg_map = seg_prob.argmax(0) # [N]
# to cpu tensor for consistency with det3d
seg_map = seg_map.cpu()
seg_pred.append(seg_map)
# warp in dict
seg_pred = [dict(semantic_mask=seg_map) for seg_map in seg_pred]
return seg_pred
def aug_test(self, points, img_metas, rescale=True):
"""Test with augmentations.
Args:
points (list[torch.Tensor]): List of points of shape [B, N, 3+C].
img_metas (list[list[dict]]): Meta information of each sample.
Outer list are different samples while inner is different augs.
rescale (bool): Whether transform to original number of points.
Will be used for voxelization based segmentors.
Defaults to True.
Returns:
list[dict]: The output prediction result with following keys:
- semantic_mask (Tensor): Segmentation mask of shape [N].
"""
# in aug_test, one scene going through different augmentations could
# have the same number of points and are stacked as a batch
# to save memory, we get augmented seg logit inplace
seg_pred = []
for point, img_meta in zip(points, img_metas):
seg_prob = self.inference(point, img_meta, rescale)
seg_prob = seg_prob.mean(0) # [num_classes, N]
seg_map = seg_prob.argmax(0) # [N]
# to cpu tensor for consistency with det3d
seg_map = seg_map.cpu()
seg_pred.append(seg_map)
# warp in dict
seg_pred = [dict(semantic_mask=seg_map) for seg_map in seg_pred]
return seg_pred
|
92bfb40c4765478295dad368c91fce4a77dc410b
|
59f64b5cf799e31c97b11828dba4787afb8f3f17
|
/datasets/load/load.1000_Genomes_phase3_chrY.GRCh38.py
|
fdfaedcd324b29bf72c2f26874319bdabe2fee6d
|
[
"MIT"
] |
permissive
|
hail-is/hail
|
2089e6f3b38548f13fa5c2a8ab67f5cfdd67b4f1
|
07a483ae0f46c66f3ed6fd265b48f48c06298f98
|
refs/heads/main
| 2023-09-01T15:03:01.450365
| 2023-09-01T02:46:35
| 2023-09-01T02:46:35
| 45,069,467
| 913
| 262
|
MIT
| 2023-09-14T21:53:32
| 2015-10-27T20:55:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,905
|
py
|
load.1000_Genomes_phase3_chrY.GRCh38.py
|
import hail as hl
ht_samples = hl.read_table('gs://hail-datasets-hail-data/1000_Genomes_phase3_samples.ht')
ht_relationships = hl.read_table('gs://hail-datasets-hail-data/1000_Genomes_phase3_sample_relationships.ht')
mt = hl.import_vcf(
'gs://hail-datasets-raw-data/1000_Genomes/1000_Genomes_phase3_chrY_GRCh38.vcf.bgz',
reference_genome='GRCh38',
contig_recoding={'Y': 'chrY'})
mt = mt.annotate_cols(**ht_samples[mt.s])
mt = mt.annotate_cols(**ht_relationships[mt.s])
mt_split = hl.split_multi(mt)
mt_split = mt_split.select_entries(GT=hl.downcode(mt_split.GT, mt_split.a_index))
mt_split = mt_split.annotate_rows(
info=hl.struct(
DP=mt_split.info.DP,
END=mt_split.info.END,
SVTYPE=mt_split.info.SVTYPE,
AA=mt_split.info.AA,
AC=mt_split.info.AC[mt_split.a_index - 1],
AF=mt_split.info.AF[mt_split.a_index - 1],
NS=mt_split.info.NS,
AN=mt_split.info.AN,
EAS_AF=mt_split.info.EAS_AF[mt_split.a_index - 1],
EUR_AF=mt_split.info.EUR_AF[mt_split.a_index - 1],
AFR_AF=mt_split.info.AFR_AF[mt_split.a_index - 1],
AMR_AF=mt_split.info.AMR_AF[mt_split.a_index - 1],
SAS_AF=mt_split.info.SAS_AF[mt_split.a_index - 1],
VT=(hl.case()
.when((mt_split.alleles[0].length() == 1) & (mt_split.alleles[1].length() == 1), 'SNP')
.when(mt_split.alleles[0].matches('<CN*>') | mt_split.alleles[1].matches('<CN*>'), 'SV')
.default('INDEL')),
EX_TARGET=mt_split.info.EX_TARGET,
MULTI_ALLELIC=mt_split.info.MULTI_ALLELIC,
STRAND_FLIP=mt_split.info.STRAND_FLIP,
REF_SWITCH=mt_split.info.REF_SWITCH,
DEPRECATED_RSID=mt_split.info.DEPRECATED_RSID[mt_split.a_index - 1],
RSID_REMOVED=mt_split.info.RSID_REMOVED[mt_split.a_index - 1],
GRCH37_38_REF_STRING_MATCH=mt_split.info.GRCH37_38_REF_STRING_MATCH,
NOT_ALL_RSIDS_STRAND_CHANGE_OR_REF_SWITCH=mt_split.info.NOT_ALL_RSIDS_STRAND_CHANGE_OR_REF_SWITCH,
GRCH37_POS=mt_split.info.GRCH37_POS,
GRCH37_REF=mt_split.info.GRCH37_REF,
ALLELE_TRANSFORM=mt_split.info.ALLELE_TRANSFORM,
REF_NEW_ALLELE=mt_split.info.REF_NEW_ALLELE,
CHROM_CHANGE_BETWEEN_ASSEMBLIES=mt_split.info.CHROM_CHANGE_BETWEEN_ASSEMBLIES[mt_split.a_index - 1]))
n_rows, n_cols = mt_split.count()
n_partitions = mt_split.n_partitions()
mt_split = hl.sample_qc(mt_split)
mt_split = hl.variant_qc(mt_split)
mt_split = mt_split.annotate_globals(
metadata=hl.struct(
name='1000_Genomes_phase3_chrY',
reference_genome='GRCh38',
n_rows=n_rows,
n_cols=n_cols,
n_partitions=n_partitions))
mt_split.write('gs://hail-datasets-hail-data/1000_Genomes_phase3_chrY.GRCh38.mt', overwrite=True)
mt = hl.read_matrix_table('gs://hail-datasets-hail-data/1000_Genomes_phase3_chrY.GRCh38.mt')
mt.describe()
print(mt.count())
|
9f80295c93a19443630e98437bbaf774c8de62d1
|
262af3a61864ba2aec01247075162a886551a439
|
/thingsboard_gateway/gateway/proto/__init__.py
|
8eba2ed7a01b3bb39c628af8000838ae5c95d64c
|
[
"Apache-2.0"
] |
permissive
|
thingsboard/thingsboard-gateway
|
1331e58013afd0872ce28120229237a886af4d0f
|
0e3996bb29c60a784d990fbcdf0fcbb69a4a82ae
|
refs/heads/master
| 2023-08-31T07:30:03.340388
| 2023-08-25T05:01:08
| 2023-08-25T05:01:08
| 78,083,065
| 1,484
| 836
|
Apache-2.0
| 2023-09-12T07:50:51
| 2017-01-05T05:41:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
__init__.py
|
# Copyright 2022. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Use the following command to generate python code from proto file;
#$ python3 -m grpc_tools.protoc -Ithingsboard_gateway/gateway/proto --python_out=thingsboard_gateway/gateway/proto/ --grpc_python_out=thingsboard_gateway/gateway/proto/ thingsboard_gateway/gateway/proto/messages.proto
# Update file messages_pb2_grpc.py:
# Replace:
# import messages_pb2 as messages__pb2
# With:
# import thingsboard_gateway.gateway.proto.messages_pb2 as messages__pb2
|
2db1fb21acd011e4ddc3d4bda6e4aa196d37e80c
|
05fe579c12f0013ce83a106083ddb66ace5e8f47
|
/tests/st/func/datavisual/image/test_single_image_restful_api.py
|
ba0e2073fbc957f206449869e6fe598d0de37982
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause"
] |
permissive
|
mindspore-ai/mindinsight
|
59d3f47144ada9a12d2c82d9826ad5f5288aed78
|
a774d893fb2f21dbc3edb5cd89f9e6eec274ebf1
|
refs/heads/master
| 2023-07-22T22:46:43.075617
| 2023-07-17T11:26:58
| 2023-07-17T11:26:58
| 250,692,948
| 224
| 24
|
Apache-2.0
| 2020-12-29T12:22:51
| 2020-03-28T01:58:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,757
|
py
|
test_single_image_restful_api.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Functions:
Test image single image restful api.
Usage:
pytest tests/st/func/datavisual
"""
import pytest
from mindinsight.datavisual.common.enums import PluginNameEnum
from .....utils.tools import get_image_tensor_from_bytes, get_url
from .. import globals as gbl
BASE_URL = '/v1/mindinsight/datavisual/image/single-image'
class TestSingleImage:
"""Test query single image."""
@pytest.mark.level0
@pytest.mark.env_single
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.usefixtures("init_summary_logs")
def test_single_image(self, client):
"""Test getting single image."""
step = 1
plugin_name = PluginNameEnum.IMAGE.value
train_id = gbl.get_train_ids()[0]
tag_name = gbl.get_tags(train_id, plugin_name)[0]
expected_image_tensor = gbl.get_single_image(train_id, tag_name, step)
params = dict(train_id=train_id, tag=tag_name, step=step)
url = get_url(BASE_URL, params)
response = client.get(url)
recv_image_tensor = get_image_tensor_from_bytes(response.data)
assert expected_image_tensor.any() == recv_image_tensor.any()
@pytest.mark.level1
@pytest.mark.env_single
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.usefixtures("init_summary_logs")
def test_single_image_no_train_id(self, client):
"""Test getting single image without train id."""
params = dict(tag="tag_name_0/image", step=1)
url = get_url(BASE_URL, params)
response = client.get(url)
assert response.status_code == 400
response = response.get_json()
assert response['error_code'] == '50540003'
assert response['error_msg'] == "Param missing. 'train_id' is required."
@pytest.mark.level1
@pytest.mark.env_single
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.usefixtures("init_summary_logs")
def test_single_image_no_tag(self, client):
"""Test getting single image without tag."""
params = dict(train_id="./summary0", step=1)
url = get_url(BASE_URL, params)
response = client.get(url)
assert response.status_code == 400
response = response.get_json()
assert response['error_code'] == '50540003'
assert response['error_msg'] == "Param missing. 'tag' is required."
@pytest.mark.level1
@pytest.mark.env_single
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.usefixtures("init_summary_logs")
def test_single_image_no_step(self, client):
"""Test getting single image without step."""
params = dict(train_id="./summary0", tag="tag_name_0/image")
url = get_url(BASE_URL, params)
response = client.get(url)
assert response.status_code == 400
response = response.get_json()
assert response['error_code'] == '50540003'
assert response['error_msg'] == "Param missing. 'step' is required."
@pytest.mark.level1
@pytest.mark.env_single
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.usefixtures("init_summary_logs")
@pytest.mark.parametrize("train_id", ["@#$", "./summary_x", dict()])
def test_single_image_with_special_train_id(self, client, train_id):
"""Test passing train_id with special character, invalid value, and wrong type."""
params = dict(train_id=train_id, tag="tag_name_0/image", step=1)
url = get_url(BASE_URL, params)
response = client.get(url)
assert response.status_code == 400
response = response.get_json()
assert response['error_code'] == '50545005'
assert response['error_msg'] == "Train job is not exist. Detail: Can not find the given train job in cache."
@pytest.mark.level1
@pytest.mark.env_single
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.usefixtures("init_summary_logs")
@pytest.mark.parametrize("tag", ["@#$", "tag_name_xxx/image", dict()])
def test_single_image_with_special_tag(self, client, tag):
"""Test passing tag with special character, invalid value, and wrong type."""
train_id = gbl.get_train_ids()[0]
params = dict(train_id=train_id, tag=tag, step=1)
url = get_url(BASE_URL, params)
response = client.get(url)
assert response.status_code == 400
response = response.get_json()
assert response['error_code'] == '5054500D'
assert response['error_msg'] == "Image is not exist. Detail: Invalid parameter value. " \
"Can not find any data in this train job by given tag."
@pytest.mark.level1
@pytest.mark.env_single
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.usefixtures("init_summary_logs")
def test_single_image_with_invalid_step(self, client):
"""Test getting single image with invalid step."""
train_id = gbl.get_train_ids()[0]
params = dict(train_id=train_id, tag="tag_name_0/image", step=1000)
url = get_url(BASE_URL, params)
response = client.get(url)
assert response.status_code == 400
response = response.get_json()
assert response['error_code'] == '5054500D'
assert response['error_msg'] == "Image is not exist. Detail: " \
"Can not find the step with given train job id and tag."
@pytest.mark.level1
@pytest.mark.env_single
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.usefixtures("init_summary_logs")
@pytest.mark.parametrize("step", ["@#$", dict()])
def test_single_image_with_special_step(self, client, step):
"""Test getting single image with special step."""
params = dict(train_id="./summary0", tag="tag_name_0/image", step=step)
url = get_url(BASE_URL, params)
response = client.get(url)
assert response.status_code == 400
response = response.get_json()
assert response['error_code'] == '50540001'
assert response['error_msg'] == "Invalid parameter type. 'step' expect Integer type."
|
d3b6206016bf5ae86fe21ad097fcb393162006dd
|
a2b20597759990445081057d35d113434cfcf970
|
/stubs/typeshed/typeshed/stubs/openpyxl/openpyxl/drawing/properties.pyi
|
a7fdff46c6bc3ad01edfaa04d05cb7fe03cb787e
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 2,968
|
pyi
|
properties.pyi
|
from _typeshed import Incomplete
from openpyxl.descriptors.serialisable import Serialisable
class GroupShapeProperties(Serialisable):
tagname: str
bwMode: Incomplete
xfrm: Incomplete
scene3d: Incomplete
extLst: Incomplete
def __init__(
self,
bwMode: Incomplete | None = ...,
xfrm: Incomplete | None = ...,
scene3d: Incomplete | None = ...,
extLst: Incomplete | None = ...,
) -> None: ...
class GroupLocking(Serialisable):
tagname: str
namespace: Incomplete
noGrp: Incomplete
noUngrp: Incomplete
noSelect: Incomplete
noRot: Incomplete
noChangeAspect: Incomplete
noMove: Incomplete
noResize: Incomplete
noChangeArrowheads: Incomplete
noEditPoints: Incomplete
noAdjustHandles: Incomplete
noChangeShapeType: Incomplete
extLst: Incomplete
__elements__: Incomplete
def __init__(
self,
noGrp: Incomplete | None = ...,
noUngrp: Incomplete | None = ...,
noSelect: Incomplete | None = ...,
noRot: Incomplete | None = ...,
noChangeAspect: Incomplete | None = ...,
noChangeArrowheads: Incomplete | None = ...,
noMove: Incomplete | None = ...,
noResize: Incomplete | None = ...,
noEditPoints: Incomplete | None = ...,
noAdjustHandles: Incomplete | None = ...,
noChangeShapeType: Incomplete | None = ...,
extLst: Incomplete | None = ...,
) -> None: ...
class NonVisualGroupDrawingShapeProps(Serialisable):
tagname: str
grpSpLocks: Incomplete
extLst: Incomplete
__elements__: Incomplete
def __init__(self, grpSpLocks: Incomplete | None = ..., extLst: Incomplete | None = ...) -> None: ...
class NonVisualDrawingShapeProps(Serialisable):
tagname: str
spLocks: Incomplete
txBax: Incomplete
extLst: Incomplete
__elements__: Incomplete
txBox: Incomplete
def __init__(
self, spLocks: Incomplete | None = ..., txBox: Incomplete | None = ..., extLst: Incomplete | None = ...
) -> None: ...
class NonVisualDrawingProps(Serialisable):
tagname: str
id: Incomplete
name: Incomplete
descr: Incomplete
hidden: Incomplete
title: Incomplete
hlinkClick: Incomplete
hlinkHover: Incomplete
extLst: Incomplete
__elements__: Incomplete
def __init__(
self,
id: Incomplete | None = ...,
name: Incomplete | None = ...,
descr: Incomplete | None = ...,
hidden: Incomplete | None = ...,
title: Incomplete | None = ...,
hlinkClick: Incomplete | None = ...,
hlinkHover: Incomplete | None = ...,
extLst: Incomplete | None = ...,
) -> None: ...
class NonVisualGroupShape(Serialisable):
tagname: str
cNvPr: Incomplete
cNvGrpSpPr: Incomplete
__elements__: Incomplete
def __init__(self, cNvPr: Incomplete | None = ..., cNvGrpSpPr: Incomplete | None = ...) -> None: ...
|
2023f37718f8b558ec4e63ed41e908563dde05d8
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-foas/aliyunsdkfoas/request/v20181111/CreateCellClusterOrderRequest.py
|
0570e709cfc7a7f191c78382419674cd85b954a9
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,501
|
py
|
CreateCellClusterOrderRequest.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkfoas.endpoint import endpoint_data
class CreateCellClusterOrderRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'foas', '2018-11-11', 'CreateCellClusterOrder','foas')
self.set_protocol_type('https')
self.set_uri_pattern('/api/v2/realtime-compute/cell/buy')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_period(self):
return self.get_body_params().get('period')
def set_period(self,period):
self.add_body_params('period', period)
def get_slaveNum(self):
return self.get_body_params().get('slaveNum')
def set_slaveNum(self,slaveNum):
self.add_body_params('slaveNum', slaveNum)
def get_slaveSpec(self):
return self.get_body_params().get('slaveSpec')
def set_slaveSpec(self,slaveSpec):
self.add_body_params('slaveSpec', slaveSpec)
def get_region(self):
return self.get_body_params().get('region')
def set_region(self,region):
self.add_body_params('region', region)
def get_masterNum(self):
return self.get_body_params().get('masterNum')
def set_masterNum(self,masterNum):
self.add_body_params('masterNum', masterNum)
def get_masterSpec(self):
return self.get_body_params().get('masterSpec')
def set_masterSpec(self,masterSpec):
self.add_body_params('masterSpec', masterSpec)
def get_payModel(self):
return self.get_body_params().get('payModel')
def set_payModel(self,payModel):
self.add_body_params('payModel', payModel)
|
f974e1f3e7d64927b93093124f46a66c9299f00f
|
cfa35dc2ea93ee0eceb2399a9e6112e987579c09
|
/stonesoup/initiator/tests/test_simple.py
|
6b8958006413904cd595896f4d34c49710cc6878
|
[
"LicenseRef-scancode-proprietary-license",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011"
] |
permissive
|
dstl/Stone-Soup
|
227e6a9e6fbdceca14af3f0259f311ec74095597
|
f24090cc919b3b590b84f965a3884ed1293d181d
|
refs/heads/main
| 2023-09-01T14:33:14.626428
| 2023-09-01T11:35:46
| 2023-09-01T11:35:46
| 98,420,803
| 315
| 126
|
MIT
| 2023-09-14T14:55:34
| 2017-07-26T12:34:28
|
Python
|
UTF-8
|
Python
| false
| false
| 14,171
|
py
|
test_simple.py
|
import datetime
import numpy as np
import pytest
from pytest import approx
from ...models.base import LinearModel, ReversibleModel
from ...models.measurement.linear import LinearGaussian
from ...models.measurement.nonlinear import CartesianToBearingRange, Cartesian2DToBearing,\
CombinedReversibleGaussianMeasurementModel
from ...models.transition.linear import \
CombinedLinearGaussianTransitionModel, ConstantVelocity
from ...updater.kalman import KalmanUpdater, ExtendedKalmanUpdater
from ...predictor.kalman import KalmanPredictor
from ...deleter.time import UpdateTimeDeleter
from ...hypothesiser.distance import DistanceHypothesiser
from ...dataassociator.neighbour import NearestNeighbour
from ...measures import Mahalanobis
from ...types.detection import Detection, TrueDetection
from ...types.hypothesis import SingleHypothesis
from ...types.prediction import Prediction
from ...types.state import GaussianState
from ...types.update import ParticleStateUpdate, Update
from ..simple import (
SinglePointInitiator, SimpleMeasurementInitiator,
MultiMeasurementInitiator, GaussianParticleInitiator
)
@pytest.mark.parametrize(
'measurement_model',
[LinearGaussian(2, [0, 1], np.diag([1, 1])),
CartesianToBearingRange(2, [1, 0], np.diag([0.1, 1]))],
ids=['linear', 'non-linear'])
def test_spi(measurement_model):
"""Test SinglePointInitiator"""
# Prior state information
prior_state = GaussianState(
np.array([[0], [0]]),
np.array([[100, 0], [0, 1]]))
# Create the Kalman updater
if isinstance(measurement_model, LinearModel):
kup = KalmanUpdater(measurement_model)
else:
kup = ExtendedKalmanUpdater(measurement_model)
# Define the Initiator
initiator = SinglePointInitiator(
prior_state,
measurement_model)
# Define 2 detections from which tracks are to be initiated
timestamp = datetime.datetime.now()
detections = [Detection(np.array([[4.5], [2.0]]), timestamp),
Detection(np.array([[-4.5], [2.0]]), timestamp)]
# Run the initiator based on the available detections
tracks = initiator.initiate(detections, timestamp)
# Ensure same number of tracks are initiated as number of measurements
# (i.e. 2)
assert (len(tracks) == 2)
# Ensure that tracks are initiated correctly
evaluated_tracks = [False, False]
for detection in detections:
hypo = SingleHypothesis(prediction=prior_state, measurement=detection)
eval_track_state = kup.update(hypo)
# Compare against both tracks
for track_idx, track in enumerate(tracks):
if (np.array_equal(eval_track_state.mean, track.mean)
and np.array_equal(eval_track_state.covar, track.covar)):
evaluated_tracks[track_idx] = True
# Ensure both tracks have been evaluated
assert (all(evaluated_tracks))
assert set(detections) == set(track.state.hypothesis.measurement
for track in tracks)
def test_linear_measurement():
measurement_model = LinearGaussian(2, [0], np.array([[50]]))
measurement_initiator = SimpleMeasurementInitiator(
GaussianState(np.array([[0], [0]]), np.diag([100, 10])),
measurement_model
)
timestamp = datetime.datetime.now()
detections = [Detection(np.array([[5]]), timestamp),
Detection(np.array([[-5]]), timestamp)]
tracks = measurement_initiator.initiate(detections, timestamp)
for track in tracks:
if track.state_vector[0, 0] > 0:
assert np.array_equal(track.state_vector, np.array([[5], [0]]))
assert np.array_equal(
measurement_model.matrix() @ track.state_vector,
detections[0].state_vector)
assert track.state.hypothesis.measurement is detections[0]
else:
assert np.array_equal(track.state_vector, np.array([[-5], [0]]))
assert np.array_equal(
measurement_model.matrix() @ track.state_vector,
detections[1].state_vector)
assert track.state.hypothesis.measurement is detections[1]
assert track.timestamp == timestamp
assert np.diag([50, 10]) == approx(track.covar)
assert measurement_model.matrix() @ track.covar @ \
measurement_model.matrix().T == approx(measurement_model.covar())
@pytest.mark.parametrize("meas_model", (CartesianToBearingRange, Cartesian2DToBearing))
@pytest.mark.parametrize("skip_non_linear", (None, True, False))
def test_nonlinear_measurement(meas_model, skip_non_linear):
meas_params = [2, [0, 1], np.diag([np.radians(2), 30])]
measurement_model = meas_model(*meas_params)
combined_measurement_model = CombinedReversibleGaussianMeasurementModel([measurement_model])
measurement_initiator = SimpleMeasurementInitiator(
prior_state=GaussianState(np.array([[0], [0]]), np.diag([100, 10])),
measurement_model=measurement_model,
skip_non_reversible=skip_non_linear)
combined_measurement_initiator = SimpleMeasurementInitiator(
prior_state=GaussianState(np.array([[0], [0]]), np.diag([100, 10])),
measurement_model=combined_measurement_model,
skip_non_reversible=skip_non_linear)
timestamp = datetime.datetime.now()
detections = [Detection(np.array([[5, 2]]), timestamp),
Detection(np.array([[-5, -2]]), timestamp)]
if not (isinstance(measurement_model, ReversibleModel) or skip_non_linear):
with pytest.raises(Exception):
# Non-reversible and not skipping
measurement_initiator.initiate(detections, timestamp)
with pytest.raises(NotImplementedError):
# Reversible but not implemented
combined_measurement_initiator.initiate(detections, timestamp)
elif not isinstance(measurement_model, ReversibleModel) and skip_non_linear:
# Skipping for non-reversible
assert len(measurement_initiator.initiate(detections, timestamp)) == 0
assert len(combined_measurement_initiator.initiate(detections, timestamp)) == 0
else:
# Otherwise tracks made
all_tracks = [measurement_initiator.initiate(detections, timestamp),
combined_measurement_initiator.initiate(detections, timestamp)]
for tracks in all_tracks:
assert len(tracks) == 2
for track in tracks:
assert track.timestamp == timestamp
jac = measurement_model.jacobian(track.state)
Ry = jac @ track.covar @ jac.T
assert Ry == approx(measurement_model.covar())
def test_linear_measurement_non_direct():
class _LinearMeasurementModel(LinearModel):
ndim_state = 2
ndmim_meas = 2
mapping = (0, 1)
def matrix(self):
return np.array([[0, 1], [2, 0]])
@staticmethod
def covar():
return np.diag([10, 50])
def ndim(self):
pass
def pdf(self):
pass
def rvs(slef):
pass
measurement_model = _LinearMeasurementModel()
measurement_initiator = SimpleMeasurementInitiator(
GaussianState(np.array([[0], [0]]), np.diag([100, 10])),
measurement_model
)
timestamp = datetime.datetime.now()
detections = [Detection(np.array([[5], [2]]), timestamp),
Detection(np.array([[-5], [8]]), timestamp)]
tracks = measurement_initiator.initiate(detections, timestamp)
for track in tracks:
if track.state_vector[1, 0] > 0:
assert np.array_equal(track.state_vector, np.array([[1], [5]]))
assert np.array_equal(
measurement_model.matrix() @ track.state_vector,
detections[0].state_vector)
assert track.state.hypothesis.measurement is detections[0]
else:
assert np.array_equal(track.state_vector, np.array([[4], [-5]]))
assert np.array_equal(
measurement_model.matrix() @ track.state_vector,
detections[1].state_vector)
assert track.state.hypothesis.measurement is detections[1]
assert track.timestamp == timestamp
assert np.diag([12.5, 10]) == approx(track.covar)
assert measurement_model.matrix() @ track.covar @ \
measurement_model.matrix().T == approx(measurement_model.covar())
def test_linear_measurement_extra_state_dim():
class _LinearMeasurementModel(LinearModel):
ndim_state = 3
ndmim_meas = 2
mapping = (0, 2)
def matrix(self):
return np.array([[1, 0, 0], [0, 0, 1]])
@staticmethod
def covar():
return np.diag([10, 50])
def ndim(self):
pass
def pdf(self):
pass
def rvs(self):
pass
measurement_model = _LinearMeasurementModel()
measurement_initiator = SimpleMeasurementInitiator(
GaussianState(np.array([[0], [0], [0]]), np.diag([100, 10, 500])),
measurement_model
)
timestamp = datetime.datetime.now()
detections = [Detection(np.array([[5], [2]]), timestamp),
Detection(np.array([[-5], [8]]), timestamp)]
tracks = measurement_initiator.initiate(detections, timestamp)
for track in tracks:
if track.state_vector[0, 0] > 0:
assert np.array_equal(
track.state_vector,
np.array([[5], [0], [2]]))
assert np.array_equal(
measurement_model.matrix() @ track.state_vector,
detections[0].state_vector)
assert track.state.hypothesis.measurement is detections[0]
else:
assert np.array_equal(
track.state_vector,
np.array([[-5], [0], [8]]))
assert np.array_equal(
measurement_model.matrix() @ track.state_vector,
detections[1].state_vector)
assert track.state.hypothesis.measurement is detections[1]
assert track.timestamp == timestamp
assert np.diag([10, 10, 50]) == approx(track.covar)
assert measurement_model.matrix() @ track.covar @ \
measurement_model.matrix().T == approx(measurement_model.covar())
@pytest.mark.parametrize('updates_only', [False, True])
def test_multi_measurement(updates_only):
transition_model = CombinedLinearGaussianTransitionModel(
(ConstantVelocity(0.05), ConstantVelocity(0.05)))
measurement_model = LinearGaussian(
ndim_state=4, mapping=[0, 2], noise_covar=np.diag([10, 10]))
predictor = KalmanPredictor(transition_model)
updater = KalmanUpdater(measurement_model)
hypothesiser = DistanceHypothesiser(
predictor, updater, measure=Mahalanobis())
data_associator = NearestNeighbour(hypothesiser)
deleter = UpdateTimeDeleter(datetime.timedelta(seconds=59))
measurement_initiator = MultiMeasurementInitiator(
GaussianState([[0], [0], [0], [0]], np.diag([0, 15, 0, 15])),
deleter, data_associator, updater,
measurement_model=measurement_model, updates_only=updates_only)
timestamp = datetime.datetime.now()
first_detections = {Detection(np.array([[5], [2]]), timestamp),
Detection(np.array([[-5], [-2]]), timestamp)}
first_tracks = measurement_initiator.initiate(first_detections, timestamp)
assert len(first_tracks) == 0
assert len(measurement_initiator.holding_tracks) == 2
timestamp = datetime.datetime.now() + datetime.timedelta(seconds=60)
second_detections = {Detection(np.array([[5], [3]]), timestamp)}
second_tracks = measurement_initiator.initiate(second_detections, timestamp)
if updates_only:
assert len(second_tracks) == 1
else:
assert len(second_tracks) == 2
assert any(isinstance(track.state, Prediction) for track in second_tracks)
assert any(isinstance(track.state, Update) for track in second_tracks)
assert len(measurement_initiator.holding_tracks) == 0
@pytest.mark.parametrize("initiator", [
SinglePointInitiator(
GaussianState(np.array([[0]]), np.array([[100]]))
),
SimpleMeasurementInitiator(
GaussianState(np.array([[0]]), np.array([[100]]))
),
], ids=['SinglePoint', 'LinearMeasurement'])
def test_measurement_model(initiator):
timestamp = datetime.datetime.now()
dummy_detection = TrueDetection(np.array([0, 0]), timestamp)
# The SinglePointInitiator will raise an error when the ExtendedKalmanUpdater
# is called and neither the detection nor the initiator has a measurement
# model. The SimpleMeasurementInitiator will raise an error in the if/else
# blocks.
with pytest.raises(ValueError):
_ = initiator.initiate({dummy_detection}, timestamp)
@pytest.mark.parametrize("gaussian_initiator", [
SinglePointInitiator(
GaussianState(np.array([[0]]), np.array([[100]])),
LinearGaussian(1, [0], np.array([[1]]))
),
SimpleMeasurementInitiator(
GaussianState(np.array([[0]]), np.array([[100]])),
LinearGaussian(1, [0], np.array([[1]]))
),
], ids=['SinglePoint', 'LinearMeasurement'])
def test_gaussian_particle(gaussian_initiator):
particle_initiator = GaussianParticleInitiator(gaussian_initiator)
timestamp = datetime.datetime.now()
detections = [Detection(np.array([[5]]), timestamp),
Detection(np.array([[-5]]), timestamp)]
tracks = particle_initiator.initiate(detections, timestamp)
for track in tracks:
assert isinstance(track.state, ParticleStateUpdate)
if track.state.mean > 0:
assert np.allclose(track.state.mean, np.array([[5]]), atol=0.4)
assert track.state.hypothesis.measurement is detections[0]
else:
assert np.allclose(track.state.mean, np.array([[-5]]), atol=0.4)
assert track.state.hypothesis.measurement is detections[1]
assert track.timestamp == timestamp
assert np.allclose(track.covar, np.array([[1]]), atol=0.4)
|
9c6c0ffb1848f11f84f8cc30b83e288f7dc69d30
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-core/huaweicloudsdkcore/auth/credentials.py
|
55d5e86b1113db7ad81a2973a3b185e5befd7f47
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 13,900
|
py
|
credentials.py
|
# coding: utf-8
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache LICENSE, Version 2.0 (the
"LICENSE"); you may not use this file except in compliance
with the LICENSE. You may obtain a copy of the LICENSE at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the LICENSE is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the LICENSE for the
specific language governing permissions and limitations
under the LICENSE.
"""
import re
import os
from abc import abstractmethod
from huaweicloudsdkcore.auth.internal import Iam, Metadata
from huaweicloudsdkcore.exceptions.exceptions import ApiValueError, ServiceResponseException, SdkException
from huaweicloudsdkcore.signer.signer import Signer, SM3Signer, DerivationAKSKSigner, P256SHA256Signer, SM2SM3Signer
from huaweicloudsdkcore.signer.algorithm import SigningAlgorithm
from huaweicloudsdkcore.auth.cache import AuthCache
from huaweicloudsdkcore.utils import time_utils, six_utils
class DerivedCredentials(six_utils.get_abstract_meta_class()):
_DEFAULT_ENDPOINT_REG = "^[a-z][a-z0-9-]+(\\.[a-z]{2,}-[a-z]+-\\d{1,2})?\\.(my)?(huaweicloud|myhwclouds).(com|cn)"
@abstractmethod
def _process_derived_auth_params(self, derived_auth_service_name, region_id):
pass
@abstractmethod
def with_derived_predicate(self, derived_predicate):
pass
@abstractmethod
def _is_derived_auth(self, request):
pass
@classmethod
def get_default_derived_predicate(cls):
return lambda request: False if re.match(cls._DEFAULT_ENDPOINT_REG, request.host) else True
class TempCredentials(six_utils.get_abstract_meta_class()):
@abstractmethod
def _need_update_security_token(self):
pass
@abstractmethod
def update_security_token_from_metadata(self):
pass
class FederalCredentials(six_utils.get_abstract_meta_class()):
@abstractmethod
def _need_update_auth_token(self):
pass
@abstractmethod
def _update_auth_token_by_id_token(self, http_client):
pass
class Credentials(DerivedCredentials, TempCredentials, FederalCredentials):
_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
_X_SECURITY_TOKEN = "X-Security-Token"
_X_AUTH_TOKEN = "X-Auth-Token"
_SIGNER_CACHE = {}
_SIGNER_CASE = {
SigningAlgorithm.HMAC_SHA256: Signer,
SigningAlgorithm.HMAC_SM3: SM3Signer,
SigningAlgorithm.ECDSA_P256_SHA256: P256SHA256Signer,
SigningAlgorithm.SM2_SM3: SM2SM3Signer
}
def __init__(self, ak=None, sk=None):
super(Credentials, self).__init__()
self.ak = ak
self.sk = sk
self.idp_id = None
self.id_token_file = None
self.iam_endpoint = None
self.security_token = None
self._derived_auth_service_name = None
self._derived_predicate = None
self._expired_at = None
self._auth_token = None
self._region_id = None
def with_ak(self, ak):
self.ak = ak
return self
def with_sk(self, sk):
self.sk = sk
return self
def with_idp_id(self, idp_id):
self.idp_id = idp_id
return self
def with_id_token_file(self, id_token_file):
self.id_token_file = id_token_file
return self
def with_iam_endpoint(self, endpoint):
self.iam_endpoint = endpoint
return self
def with_security_token(self, token):
self.security_token = token
return self
def get_update_path_params(self):
pass
def process_auth_params(self, http_client, region_id):
pass
def process_auth_request(self, request, http_client):
if self._need_update_auth_token():
self._update_auth_token_by_id_token(http_client)
elif self._need_update_security_token():
self.update_security_token_from_metadata()
return http_client.executor.submit(self.sign_request, request)
def sign_request(self, request):
if self._auth_token:
request.header_params[self._X_AUTH_TOKEN] = self._auth_token
Signer.process_request_uri(request)
return request
if self.security_token is not None:
request.header_params["X-Security-Token"] = self.security_token
if "Content-Type" in request.header_params and not request.header_params["Content-Type"].startswith(
"application/json"):
request.header_params["X-Sdk-Content-Sha256"] = "UNSIGNED-PAYLOAD"
if self._is_derived_auth(request):
return DerivationAKSKSigner(self).sign(request, self._derived_auth_service_name, self._region_id)
signer_key = str(request.signing_algorithm) + self.ak
if signer_key in self._SIGNER_CACHE:
signer = self._SIGNER_CACHE.get(signer_key)
else:
signer_cls = self._SIGNER_CASE.get(request.signing_algorithm)
if not signer_cls:
raise SdkException("unsupported signing algorithm: " + str(request.signing_algorithm))
signer = signer_cls(self)
self._SIGNER_CACHE[signer_key] = signer
return signer.sign(request)
def _is_derived_auth(self, request):
if not self._derived_predicate:
return False
return self._derived_predicate(request)
def with_derived_predicate(self, derived_predicate):
self._derived_predicate = derived_predicate
return self
def _process_derived_auth_params(self, derived_auth_service_name, region_id):
pass
def _need_update_security_token(self):
if not self._expired_at or not self.security_token:
return False
return self._expired_at - time_utils.get_timestamp_utc() < 60
def update_security_token_from_metadata(self):
credential = Metadata.get_credential_from_metadata()
self.ak = credential.get("access")
self.sk = credential.get("secret")
self.security_token = credential.get("securitytoken")
self._expired_at = time_utils.get_timestamp_from_str(credential.get("expires_at"), self._TIME_FORMAT)
def _need_update_auth_token(self):
if not self.idp_id or not self.id_token_file:
return False
if not self._auth_token or not self._expired_at:
return True
return self._expired_at - time_utils.get_timestamp_utc() < 60
def _update_auth_token_by_id_token(self, http_client):
pass
def _get_id_token(self):
if not os.path.exists(self.id_token_file):
raise ApiValueError("id_token_file '{}' does not exist".format(self.id_token_file))
with open(self.id_token_file, "r") as f:
id_token = f.read()
if not id_token:
raise ApiValueError("id_token is empty in id_token_file '{}'".format(self.id_token_file))
return id_token
class BasicCredentials(Credentials):
_X_PROJECT_ID = "X-Project-Id"
def __init__(self, ak=None, sk=None, project_id=None):
"""For regional services' authentication
:param ak: The access key ID for your account
:param sk: The secret access key for your account
:param project_id: The ID of your project depending on your region which you want to operate
"""
super(BasicCredentials, self).__init__(ak, sk)
self.project_id = project_id
def with_project_id(self, project_id):
self.project_id = project_id
return self
def get_update_path_params(self):
path_params = {}
if self.project_id:
path_params["project_id"] = self.project_id
return path_params
def _process_derived_auth_params(self, derived_auth_service_name, region_id):
if not self._derived_auth_service_name:
self._derived_auth_service_name = derived_auth_service_name
if not self._region_id:
self._region_id = region_id
def process_auth_params(self, http_client, region_id):
if self.idp_id or self.id_token_file:
if not self.idp_id:
raise ApiValueError("idp_id is required when using idp_id & id_token_file")
elif not self.id_token_file:
raise ApiValueError("id_token_file is required when using idp_id & id_token_file")
if not self.project_id:
raise ApiValueError("project_id is required when using idp_id & id_token_file")
if self.project_id:
return self
ak_with_name = self.ak + region_id
project_id = AuthCache.get_auth(ak_with_name)
if project_id:
self.project_id = project_id
return self
derived_predicate = self._derived_predicate
self._derived_predicate = None
if self.iam_endpoint is None:
self.iam_endpoint = Iam.get_iam_endpoint()
req = Iam.get_keystone_list_projects_request(http_client.config, self.iam_endpoint, region_id=region_id)
future_request = self.process_auth_request(req, http_client)
request = future_request.result()
try:
self.project_id = Iam.keystone_list_projects(http_client, request)
AuthCache.put_auth(ak_with_name, self.project_id)
except ServiceResponseException as e:
err_msg = e.error_msg if hasattr(e, "error_msg") else "unknown exception."
raise ApiValueError("Failed to get project id, " + err_msg)
self._derived_predicate = derived_predicate
return self
def sign_request(self, request):
if self.project_id:
request.header_params[self._X_PROJECT_ID] = self.project_id
return super(BasicCredentials, self).sign_request(request)
def _update_auth_token_by_id_token(self, http_client):
iam_endpoint = self.iam_endpoint if self.iam_endpoint else Iam.get_iam_endpoint()
request = Iam.get_create_token_by_id_token_request(http_client.config, iam_endpoint, self.idp_id,
self._get_id_token(), project_id=self.project_id)
token, expired_str = Iam.create_token_by_id_token(http_client, request)
self._expired_at = time_utils.get_timestamp_from_str(expired_str, self._TIME_FORMAT)
self._auth_token = token
class GlobalCredentials(Credentials):
_X_DOMAIN_ID = "X-Domain-Id"
def __init__(self, ak=None, sk=None, domain_id=None):
"""For global services' authentication
:param ak: The access key ID for your account
:param sk: The secret access key for your account
:param domain_id: The account ID of Huawei Cloud
"""
super(GlobalCredentials, self).__init__(ak, sk)
self.domain_id = domain_id
def with_domain_id(self, domain_id):
self.domain_id = domain_id
return self
def get_update_path_params(self):
path_params = {}
if self.domain_id:
path_params["domain_id"] = self.domain_id
return path_params
def process_auth_params(self, http_client, region_id):
if self.idp_id or self.id_token_file:
if not self.idp_id:
raise ApiValueError("idp_id is required when using idp_id & id_token_file")
elif not self.id_token_file:
raise ApiValueError("id_token_file is required when using idp_id & id_token_file")
if not self.domain_id:
raise ApiValueError("domain_id is required when using idp_id & id_token_file")
if self.domain_id:
return self
domain_id = AuthCache.get_auth(self.ak)
if domain_id:
self.domain_id = domain_id
return self
derived_predicate = self._derived_predicate
self._derived_predicate = None
if self.iam_endpoint is None:
self.iam_endpoint = Iam.get_iam_endpoint()
req = Iam.get_keystone_list_auth_domains_request(http_client.config, self.iam_endpoint)
future_request = self.process_auth_request(req, http_client)
request = future_request.result()
try:
self.domain_id = Iam.keystone_list_auth_domains(http_client, request)
AuthCache.put_auth(self.ak, self.domain_id)
except ServiceResponseException as e:
err_msg = e.error_msg if hasattr(e, "error_msg") else "unknown exception."
raise ApiValueError("Failed to get domain id, " + err_msg)
self._derived_predicate = derived_predicate
return self
def sign_request(self, request):
if self.domain_id:
request.header_params[self._X_DOMAIN_ID] = self.domain_id
return super(GlobalCredentials, self).sign_request(request)
def _process_derived_auth_params(self, derived_auth_service_name, region_id):
if not self._derived_auth_service_name:
self._derived_auth_service_name = derived_auth_service_name
if not self._region_id:
self._region_id = "globe"
def _update_auth_token_by_id_token(self, http_client):
iam_endpoint = self.iam_endpoint if self.iam_endpoint else Iam.get_iam_endpoint()
request = Iam.get_create_token_by_id_token_request(http_client.config, iam_endpoint, self.idp_id,
self._get_id_token(), domain_id=self.domain_id)
token, expired_str = Iam.create_token_by_id_token(http_client, request)
self._expired_at = time_utils.get_timestamp_from_str(expired_str, self._TIME_FORMAT)
self._auth_token = token
|
46e1eaf41b2a27b0f8c3174a9796e55cf3849b3a
|
749af8e81d5ccd2d8714a34434a9c77772df551b
|
/statsmodels/tsa/statespace/kalman_filter.py
|
c9981bab92324a4ca2f274a3ecd0214eac5b3aac
|
[
"BSD-3-Clause"
] |
permissive
|
statsmodels/statsmodels
|
98ca67192c08bcc611ed3a75edaded2c7181ab98
|
01b19d7d111b29c183f620ff0a949ef6391ff8ee
|
refs/heads/main
| 2023-09-05T13:05:49.497076
| 2023-09-01T10:54:50
| 2023-09-01T10:54:50
| 1,885,237
| 8,666
| 3,023
|
BSD-3-Clause
| 2023-09-13T17:51:48
| 2011-06-12T17:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 106,207
|
py
|
kalman_filter.py
|
"""
State Space Representation and Kalman Filter
Author: Chad Fulton
License: Simplified-BSD
"""
import contextlib
from warnings import warn
import numpy as np
from .representation import OptionWrapper, Representation, FrozenRepresentation
from .tools import reorder_missing_matrix, reorder_missing_vector
from . import tools
from statsmodels.tools.sm_exceptions import ValueWarning
# Define constants
FILTER_CONVENTIONAL = 0x01 # Durbin and Koopman (2012), Chapter 4
FILTER_EXACT_INITIAL = 0x02 # ibid., Chapter 5.6
FILTER_AUGMENTED = 0x04 # ibid., Chapter 5.7
FILTER_SQUARE_ROOT = 0x08 # ibid., Chapter 6.3
FILTER_UNIVARIATE = 0x10 # ibid., Chapter 6.4
FILTER_COLLAPSED = 0x20 # ibid., Chapter 6.5
FILTER_EXTENDED = 0x40 # ibid., Chapter 10.2
FILTER_UNSCENTED = 0x80 # ibid., Chapter 10.3
FILTER_CONCENTRATED = 0x100 # Harvey (1989), Chapter 3.4
FILTER_CHANDRASEKHAR = 0x200 # Herbst (2015)
INVERT_UNIVARIATE = 0x01
SOLVE_LU = 0x02
INVERT_LU = 0x04
SOLVE_CHOLESKY = 0x08
INVERT_CHOLESKY = 0x10
STABILITY_FORCE_SYMMETRY = 0x01
MEMORY_STORE_ALL = 0
MEMORY_NO_FORECAST_MEAN = 0x01
MEMORY_NO_FORECAST_COV = 0x02
MEMORY_NO_FORECAST = MEMORY_NO_FORECAST_MEAN | MEMORY_NO_FORECAST_COV
MEMORY_NO_PREDICTED_MEAN = 0x04
MEMORY_NO_PREDICTED_COV = 0x08
MEMORY_NO_PREDICTED = MEMORY_NO_PREDICTED_MEAN | MEMORY_NO_PREDICTED_COV
MEMORY_NO_FILTERED_MEAN = 0x10
MEMORY_NO_FILTERED_COV = 0x20
MEMORY_NO_FILTERED = MEMORY_NO_FILTERED_MEAN | MEMORY_NO_FILTERED_COV
MEMORY_NO_LIKELIHOOD = 0x40
MEMORY_NO_GAIN = 0x80
MEMORY_NO_SMOOTHING = 0x100
MEMORY_NO_STD_FORECAST = 0x200
MEMORY_CONSERVE = (
MEMORY_NO_FORECAST_COV | MEMORY_NO_PREDICTED | MEMORY_NO_FILTERED |
MEMORY_NO_LIKELIHOOD | MEMORY_NO_GAIN | MEMORY_NO_SMOOTHING
)
TIMING_INIT_PREDICTED = 0
TIMING_INIT_FILTERED = 1
class KalmanFilter(Representation):
r"""
State space representation of a time series process, with Kalman filter
Parameters
----------
k_endog : {array_like, int}
The observed time-series process :math:`y` if array like or the
number of variables in the process if an integer.
k_states : int
The dimension of the unobserved state process.
k_posdef : int, optional
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the transition equation. Must be less than
or equal to `k_states`. Default is `k_states`.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
results_class : class, optional
Default results class to use to save filtering output. Default is
`FilterResults`. If specified, class must extend from `FilterResults`.
**kwargs
Keyword arguments may be used to provide values for the filter,
inversion, and stability methods. See `set_filter_method`,
`set_inversion_method`, and `set_stability_method`.
Keyword arguments may be used to provide default values for state space
matrices. See `Representation` for more details.
See Also
--------
FilterResults
statsmodels.tsa.statespace.representation.Representation
Notes
-----
There are several types of options available for controlling the Kalman
filter operation. All options are internally held as bitmasks, but can be
manipulated by setting class attributes, which act like boolean flags. For
more information, see the `set_*` class method documentation. The options
are:
filter_method
The filtering method controls aspects of which
Kalman filtering approach will be used.
inversion_method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
stability_method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
conserve_memory
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
filter_timing
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
The `filter_method` and `inversion_method` options intentionally allow
the possibility that multiple methods will be indicated. In the case that
multiple methods are selected, the underlying Kalman filter will attempt to
select the optional method given the input data.
For example, it may be that INVERT_UNIVARIATE and SOLVE_CHOLESKY are
indicated (this is in fact the default case). In this case, if the
endogenous vector is 1-dimensional (`k_endog` = 1), then INVERT_UNIVARIATE
is used and inversion reduces to simple division, and if it has a larger
dimension, the Cholesky decomposition along with linear solving (rather
than explicit matrix inversion) is used. If only SOLVE_CHOLESKY had been
set, then the Cholesky decomposition method would *always* be used, even in
the case of 1-dimensional data.
"""
filter_methods = [
'filter_conventional', 'filter_exact_initial', 'filter_augmented',
'filter_square_root', 'filter_univariate', 'filter_collapsed',
'filter_extended', 'filter_unscented', 'filter_concentrated',
'filter_chandrasekhar'
]
filter_conventional = OptionWrapper('filter_method', FILTER_CONVENTIONAL)
"""
(bool) Flag for conventional Kalman filtering.
"""
filter_exact_initial = OptionWrapper('filter_method', FILTER_EXACT_INITIAL)
"""
(bool) Flag for exact initial Kalman filtering. Not implemented.
"""
filter_augmented = OptionWrapper('filter_method', FILTER_AUGMENTED)
"""
(bool) Flag for augmented Kalman filtering. Not implemented.
"""
filter_square_root = OptionWrapper('filter_method', FILTER_SQUARE_ROOT)
"""
(bool) Flag for square-root Kalman filtering. Not implemented.
"""
filter_univariate = OptionWrapper('filter_method', FILTER_UNIVARIATE)
"""
(bool) Flag for univariate filtering of multivariate observation vector.
"""
filter_collapsed = OptionWrapper('filter_method', FILTER_COLLAPSED)
"""
(bool) Flag for Kalman filtering with collapsed observation vector.
"""
filter_extended = OptionWrapper('filter_method', FILTER_EXTENDED)
"""
(bool) Flag for extended Kalman filtering. Not implemented.
"""
filter_unscented = OptionWrapper('filter_method', FILTER_UNSCENTED)
"""
(bool) Flag for unscented Kalman filtering. Not implemented.
"""
filter_concentrated = OptionWrapper('filter_method', FILTER_CONCENTRATED)
"""
(bool) Flag for Kalman filtering with concentrated log-likelihood.
"""
filter_chandrasekhar = OptionWrapper('filter_method', FILTER_CHANDRASEKHAR)
"""
(bool) Flag for filtering with Chandrasekhar recursions.
"""
inversion_methods = [
'invert_univariate', 'solve_lu', 'invert_lu', 'solve_cholesky',
'invert_cholesky'
]
invert_univariate = OptionWrapper('inversion_method', INVERT_UNIVARIATE)
"""
(bool) Flag for univariate inversion method (recommended).
"""
solve_lu = OptionWrapper('inversion_method', SOLVE_LU)
"""
(bool) Flag for LU and linear solver inversion method.
"""
invert_lu = OptionWrapper('inversion_method', INVERT_LU)
"""
(bool) Flag for LU inversion method.
"""
solve_cholesky = OptionWrapper('inversion_method', SOLVE_CHOLESKY)
"""
(bool) Flag for Cholesky and linear solver inversion method (recommended).
"""
invert_cholesky = OptionWrapper('inversion_method', INVERT_CHOLESKY)
"""
(bool) Flag for Cholesky inversion method.
"""
stability_methods = ['stability_force_symmetry']
stability_force_symmetry = (
OptionWrapper('stability_method', STABILITY_FORCE_SYMMETRY)
)
"""
(bool) Flag for enforcing covariance matrix symmetry
"""
memory_options = [
'memory_store_all', 'memory_no_forecast_mean',
'memory_no_forecast_cov', 'memory_no_forecast',
'memory_no_predicted_mean', 'memory_no_predicted_cov',
'memory_no_predicted', 'memory_no_filtered_mean',
'memory_no_filtered_cov', 'memory_no_filtered',
'memory_no_likelihood', 'memory_no_gain',
'memory_no_smoothing', 'memory_no_std_forecast', 'memory_conserve'
]
memory_store_all = OptionWrapper('conserve_memory', MEMORY_STORE_ALL)
"""
(bool) Flag for storing all intermediate results in memory (default).
"""
memory_no_forecast_mean = OptionWrapper(
'conserve_memory', MEMORY_NO_FORECAST_MEAN)
"""
(bool) Flag to prevent storing forecasts and forecast errors.
"""
memory_no_forecast_cov = OptionWrapper(
'conserve_memory', MEMORY_NO_FORECAST_COV)
"""
(bool) Flag to prevent storing forecast error covariance matrices.
"""
@property
def memory_no_forecast(self):
"""
(bool) Flag to prevent storing all forecast-related output.
"""
return self.memory_no_forecast_mean or self.memory_no_forecast_cov
@memory_no_forecast.setter
def memory_no_forecast(self, value):
if bool(value):
self.memory_no_forecast_mean = True
self.memory_no_forecast_cov = True
else:
self.memory_no_forecast_mean = False
self.memory_no_forecast_cov = False
memory_no_predicted_mean = OptionWrapper(
'conserve_memory', MEMORY_NO_PREDICTED_MEAN)
"""
(bool) Flag to prevent storing predicted states.
"""
memory_no_predicted_cov = OptionWrapper(
'conserve_memory', MEMORY_NO_PREDICTED_COV)
"""
(bool) Flag to prevent storing predicted state covariance matrices.
"""
@property
def memory_no_predicted(self):
"""
(bool) Flag to prevent storing predicted state and covariance matrices.
"""
return self.memory_no_predicted_mean or self.memory_no_predicted_cov
@memory_no_predicted.setter
def memory_no_predicted(self, value):
if bool(value):
self.memory_no_predicted_mean = True
self.memory_no_predicted_cov = True
else:
self.memory_no_predicted_mean = False
self.memory_no_predicted_cov = False
memory_no_filtered_mean = OptionWrapper(
'conserve_memory', MEMORY_NO_FILTERED_MEAN)
"""
(bool) Flag to prevent storing filtered states.
"""
memory_no_filtered_cov = OptionWrapper(
'conserve_memory', MEMORY_NO_FILTERED_COV)
"""
(bool) Flag to prevent storing filtered state covariance matrices.
"""
@property
def memory_no_filtered(self):
"""
(bool) Flag to prevent storing filtered state and covariance matrices.
"""
return self.memory_no_filtered_mean or self.memory_no_filtered_cov
@memory_no_filtered.setter
def memory_no_filtered(self, value):
if bool(value):
self.memory_no_filtered_mean = True
self.memory_no_filtered_cov = True
else:
self.memory_no_filtered_mean = False
self.memory_no_filtered_cov = False
memory_no_likelihood = (
OptionWrapper('conserve_memory', MEMORY_NO_LIKELIHOOD)
)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_gain = OptionWrapper('conserve_memory', MEMORY_NO_GAIN)
"""
(bool) Flag to prevent storing the Kalman gain matrices.
"""
memory_no_smoothing = OptionWrapper('conserve_memory', MEMORY_NO_SMOOTHING)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_std_forecast = (
OptionWrapper('conserve_memory', MEMORY_NO_STD_FORECAST))
"""
(bool) Flag to prevent storing standardized forecast errors.
"""
memory_conserve = OptionWrapper('conserve_memory', MEMORY_CONSERVE)
"""
(bool) Flag to conserve the maximum amount of memory.
"""
timing_options = [
'timing_init_predicted', 'timing_init_filtered'
]
timing_init_predicted = OptionWrapper('filter_timing',
TIMING_INIT_PREDICTED)
"""
(bool) Flag for the default timing convention (Durbin and Koopman, 2012).
"""
timing_init_filtered = OptionWrapper('filter_timing', TIMING_INIT_FILTERED)
"""
(bool) Flag for the alternate timing convention (Kim and Nelson, 2012).
"""
# Default filter options
filter_method = FILTER_CONVENTIONAL
"""
(int) Filtering method bitmask.
"""
inversion_method = INVERT_UNIVARIATE | SOLVE_CHOLESKY
"""
(int) Inversion method bitmask.
"""
stability_method = STABILITY_FORCE_SYMMETRY
"""
(int) Stability method bitmask.
"""
conserve_memory = MEMORY_STORE_ALL
"""
(int) Memory conservation bitmask.
"""
filter_timing = TIMING_INIT_PREDICTED
"""
(int) Filter timing.
"""
def __init__(self, k_endog, k_states, k_posdef=None,
loglikelihood_burn=0, tolerance=1e-19, results_class=None,
kalman_filter_classes=None, **kwargs):
# Extract keyword arguments to-be-used later
keys = ['filter_method'] + KalmanFilter.filter_methods
filter_method_kwargs = {key: kwargs.pop(key) for key in keys
if key in kwargs}
keys = ['inversion_method'] + KalmanFilter.inversion_methods
inversion_method_kwargs = {key: kwargs.pop(key) for key in keys
if key in kwargs}
keys = ['stability_method'] + KalmanFilter.stability_methods
stability_method_kwargs = {key: kwargs.pop(key) for key in keys
if key in kwargs}
keys = ['conserve_memory'] + KalmanFilter.memory_options
conserve_memory_kwargs = {key: kwargs.pop(key) for key in keys
if key in kwargs}
keys = ['alternate_timing'] + KalmanFilter.timing_options
filter_timing_kwargs = {key: kwargs.pop(key) for key in keys
if key in kwargs}
# Initialize the base class
super(KalmanFilter, self).__init__(
k_endog, k_states, k_posdef, **kwargs
)
# Setup the underlying Kalman filter storage
self._kalman_filters = {}
# Filter options
self.loglikelihood_burn = loglikelihood_burn
self.results_class = (
results_class if results_class is not None else FilterResults
)
# Options
self.prefix_kalman_filter_map = (
kalman_filter_classes
if kalman_filter_classes is not None
else tools.prefix_kalman_filter_map.copy())
self.set_filter_method(**filter_method_kwargs)
self.set_inversion_method(**inversion_method_kwargs)
self.set_stability_method(**stability_method_kwargs)
self.set_conserve_memory(**conserve_memory_kwargs)
self.set_filter_timing(**filter_timing_kwargs)
self.tolerance = tolerance
# Internal flags
# The _scale internal flag is used because we may want to
# use a fixed scale, in which case we want the flag to the Cython
# Kalman filter to indicate that the scale should not be concentrated
# out, so that self.filter_concentrated = False, but we still want to
# alert the results object that we are viewing the model as one in
# which the scale had been concentrated out for e.g. degree of freedom
# computations.
# This value should always be None, except within the fixed_scale
# context, and should not be modified by users or anywhere else.
self._scale = None
def _clone_kwargs(self, endog, **kwargs):
# See Representation._clone_kwargs for docstring
kwargs = super(KalmanFilter, self)._clone_kwargs(endog, **kwargs)
# Get defaults for options
kwargs.setdefault('filter_method', self.filter_method)
kwargs.setdefault('inversion_method', self.inversion_method)
kwargs.setdefault('stability_method', self.stability_method)
kwargs.setdefault('conserve_memory', self.conserve_memory)
kwargs.setdefault('alternate_timing', bool(self.filter_timing))
kwargs.setdefault('tolerance', self.tolerance)
kwargs.setdefault('loglikelihood_burn', self.loglikelihood_burn)
return kwargs
@property
def _kalman_filter(self):
prefix = self.prefix
if prefix in self._kalman_filters:
return self._kalman_filters[prefix]
return None
def _initialize_filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
tolerance=None, filter_timing=None,
loglikelihood_burn=None):
if filter_method is None:
filter_method = self.filter_method
if inversion_method is None:
inversion_method = self.inversion_method
if stability_method is None:
stability_method = self.stability_method
if conserve_memory is None:
conserve_memory = self.conserve_memory
if loglikelihood_burn is None:
loglikelihood_burn = self.loglikelihood_burn
if filter_timing is None:
filter_timing = self.filter_timing
if tolerance is None:
tolerance = self.tolerance
# Make sure we have endog
if self.endog is None:
raise RuntimeError('Must bind a dataset to the model before'
' filtering or smoothing.')
# Initialize the representation matrices
prefix, dtype, create_statespace = self._initialize_representation()
# Determine if we need to (re-)create the filter
# (definitely need to recreate if we recreated the _statespace object)
create_filter = create_statespace or prefix not in self._kalman_filters
if not create_filter:
kalman_filter = self._kalman_filters[prefix]
create_filter = (
not kalman_filter.conserve_memory == conserve_memory or
not kalman_filter.loglikelihood_burn == loglikelihood_burn
)
# If the dtype-specific _kalman_filter does not exist (or if we need
# to re-create it), create it
if create_filter:
if prefix in self._kalman_filters:
# Delete the old filter
del self._kalman_filters[prefix]
# Setup the filter
cls = self.prefix_kalman_filter_map[prefix]
self._kalman_filters[prefix] = cls(
self._statespaces[prefix], filter_method, inversion_method,
stability_method, conserve_memory, filter_timing, tolerance,
loglikelihood_burn
)
# Otherwise, update the filter parameters
else:
kalman_filter = self._kalman_filters[prefix]
kalman_filter.set_filter_method(filter_method, False)
kalman_filter.inversion_method = inversion_method
kalman_filter.stability_method = stability_method
kalman_filter.filter_timing = filter_timing
kalman_filter.tolerance = tolerance
# conserve_memory and loglikelihood_burn changes always lead to
# re-created filters
return prefix, dtype, create_filter, create_statespace
def set_filter_method(self, filter_method=None, **kwargs):
r"""
Set the filtering method
The filtering method controls aspects of which Kalman filtering
approach will be used.
Parameters
----------
filter_method : int, optional
Bitmask value to set the filter method to. See notes for details.
**kwargs
Keyword arguments may be used to influence the filter method by
setting individual boolean flags. See notes for details.
Notes
-----
The filtering method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
FILTER_CONVENTIONAL
Conventional Kalman filter.
FILTER_UNIVARIATE
Univariate approach to Kalman filtering. Overrides conventional
method if both are specified.
FILTER_COLLAPSED
Collapsed approach to Kalman filtering. Will be used *in addition*
to conventional or univariate filtering.
FILTER_CONCENTRATED
Use the concentrated log-likelihood function. Will be used
*in addition* to the other options.
Note that only the first method is available if using a Scipy version
older than 0.16.
If the bitmask is set directly via the `filter_method` argument, then
the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the filter method may also be specified by directly modifying
the class attributes which are defined similarly to the keyword
arguments.
The default filtering method is FILTER_CONVENTIONAL.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.filter_method
1
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
>>> mod.ssm.set_filter_method(filter_univariate=False,
... filter_collapsed=True)
>>> mod.ssm.filter_method
33
>>> mod.ssm.set_filter_method(filter_method=1)
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate
False
>>> mod.ssm.filter_collapsed
False
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
"""
if filter_method is not None:
self.filter_method = filter_method
for name in KalmanFilter.filter_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_inversion_method(self, inversion_method=None, **kwargs):
r"""
Set the inversion method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
Parameters
----------
inversion_method : int, optional
Bitmask value to set the inversion method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the inversion method by
setting individual boolean flags. See notes for details.
Notes
-----
The inversion method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
INVERT_UNIVARIATE
If the endogenous time series is univariate, then inversion can be
performed by simple division. If this flag is set and the time
series is univariate, then division will always be used even if
other flags are also set.
SOLVE_LU
Use an LU decomposition along with a linear solver (rather than
ever actually inverting the matrix).
INVERT_LU
Use an LU decomposition along with typical matrix inversion.
SOLVE_CHOLESKY
Use a Cholesky decomposition along with a linear solver.
INVERT_CHOLESKY
Use an Cholesky decomposition along with typical matrix inversion.
If the bitmask is set directly via the `inversion_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the inversion method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default inversion method is `INVERT_UNIVARIATE | SOLVE_CHOLESKY`
Several things to keep in mind are:
- If the filtering method is specified to be univariate, then simple
division is always used regardless of the dimension of the endogenous
time series.
- Cholesky decomposition is about twice as fast as LU decomposition,
but it requires that the matrix be positive definite. While this
should generally be true, it may not be in every case.
- Using a linear solver rather than true matrix inversion is generally
faster and is numerically more stable.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.inversion_method
1
>>> mod.ssm.solve_cholesky
True
>>> mod.ssm.invert_univariate
True
>>> mod.ssm.invert_lu
False
>>> mod.ssm.invert_univariate = False
>>> mod.ssm.inversion_method
8
>>> mod.ssm.set_inversion_method(solve_cholesky=False,
... invert_cholesky=True)
>>> mod.ssm.inversion_method
16
"""
if inversion_method is not None:
self.inversion_method = inversion_method
for name in KalmanFilter.inversion_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_stability_method(self, stability_method=None, **kwargs):
r"""
Set the numerical stability method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
Parameters
----------
stability_method : int, optional
Bitmask value to set the stability method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the stability method by
setting individual boolean flags. See notes for details.
Notes
-----
The stability method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
STABILITY_FORCE_SYMMETRY = 0x01
If this flag is set, symmetry of the predicted state covariance
matrix is enforced at each iteration of the filter, where each
element is set to the average of the corresponding elements in the
upper and lower triangle.
If the bitmask is set directly via the `stability_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the stability method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default stability method is `STABILITY_FORCE_SYMMETRY`
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.stability_method
1
>>> mod.ssm.stability_force_symmetry
True
>>> mod.ssm.stability_force_symmetry = False
>>> mod.ssm.stability_method
0
"""
if stability_method is not None:
self.stability_method = stability_method
for name in KalmanFilter.stability_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_conserve_memory(self, conserve_memory=None, **kwargs):
r"""
Set the memory conservation method
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
Parameters
----------
conserve_memory : int, optional
Bitmask value to set the memory conservation method to. See notes
for details.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
Notes
-----
The memory conservation method is defined by a collection of boolean
flags, and is internally stored as a bitmask. The methods available
are:
MEMORY_STORE_ALL
Store all intermediate matrices. This is the default value.
MEMORY_NO_FORECAST_MEAN
Do not store the forecast or forecast errors. If this option is
used, the `predict` method from the results class is unavailable.
MEMORY_NO_FORECAST_COV
Do not store the forecast error covariance matrices.
MEMORY_NO_FORECAST
Do not store the forecast, forecast error, or forecast error
covariance matrices. If this option is used, the `predict` method
from the results class is unavailable.
MEMORY_NO_PREDICTED_MEAN
Do not store the predicted state.
MEMORY_NO_PREDICTED_COV
Do not store the predicted state covariance
matrices.
MEMORY_NO_PREDICTED
Do not store the predicted state or predicted state covariance
matrices.
MEMORY_NO_FILTERED_MEAN
Do not store the filtered state.
MEMORY_NO_FILTERED_COV
Do not store the filtered state covariance
matrices.
MEMORY_NO_FILTERED
Do not store the filtered state or filtered state covariance
matrices.
MEMORY_NO_LIKELIHOOD
Do not store the vector of loglikelihood values for each
observation. Only the sum of the loglikelihood values is stored.
MEMORY_NO_GAIN
Do not store the Kalman gain matrices.
MEMORY_NO_SMOOTHING
Do not store temporary variables related to Kalman smoothing. If
this option is used, smoothing is unavailable.
MEMORY_NO_STD_FORECAST
Do not store standardized forecast errors.
MEMORY_CONSERVE
Do not store any intermediate matrices.
If the bitmask is set directly via the `conserve_memory` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the memory conservation method may also be specified by
directly modifying the class attributes which are defined similarly to
the keyword arguments.
The default memory conservation method is `MEMORY_STORE_ALL`, so that
all intermediate matrices are stored.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm..conserve_memory
0
>>> mod.ssm.memory_no_predicted
False
>>> mod.ssm.memory_no_predicted = True
>>> mod.ssm.conserve_memory
2
>>> mod.ssm.set_conserve_memory(memory_no_filtered=True,
... memory_no_forecast=True)
>>> mod.ssm.conserve_memory
7
"""
if conserve_memory is not None:
self.conserve_memory = conserve_memory
for name in KalmanFilter.memory_options:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_filter_timing(self, alternate_timing=None, **kwargs):
r"""
Set the filter timing convention
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
Parameters
----------
alternate_timing : int, optional
Whether or not to use the alternate timing convention. Default is
unspecified.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
"""
if alternate_timing is not None:
self.filter_timing = int(alternate_timing)
if 'timing_init_predicted' in kwargs:
self.filter_timing = int(not kwargs['timing_init_predicted'])
if 'timing_init_filtered' in kwargs:
self.filter_timing = int(kwargs['timing_init_filtered'])
@contextlib.contextmanager
def fixed_scale(self, scale):
"""
fixed_scale(scale)
Context manager for fixing the scale when FILTER_CONCENTRATED is set
Parameters
----------
scale : numeric
Scale of the model.
Notes
-----
This a no-op if scale is None.
This context manager is most useful in models which are explicitly
concentrating out the scale, so that the set of parameters they are
estimating does not include the scale.
"""
# If a scale was provided, use it and do not concentrate it out of the
# loglikelihood
if scale is not None and scale != 1:
if not self.filter_concentrated:
raise ValueError('Cannot provide scale if filter method does'
' not include FILTER_CONCENTRATED.')
self.filter_concentrated = False
self._scale = scale
obs_cov = self['obs_cov']
state_cov = self['state_cov']
self['obs_cov'] = scale * obs_cov
self['state_cov'] = scale * state_cov
try:
yield
finally:
# If a scale was provided, reset the model
if scale is not None and scale != 1:
self['state_cov'] = state_cov
self['obs_cov'] = obs_cov
self.filter_concentrated = True
self._scale = None
def _filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
filter_timing=None, tolerance=None, loglikelihood_burn=None,
complex_step=False):
# Initialize the filter
prefix, dtype, create_filter, create_statespace = (
self._initialize_filter(
filter_method, inversion_method, stability_method,
conserve_memory, filter_timing, tolerance, loglikelihood_burn
)
)
kfilter = self._kalman_filters[prefix]
# Initialize the state
self._initialize_state(prefix=prefix, complex_step=complex_step)
# Run the filter
kfilter()
return kfilter
def filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None, filter_timing=None,
tolerance=None, loglikelihood_burn=None, complex_step=False):
r"""
Apply the Kalman filter to the statespace model.
Parameters
----------
filter_method : int, optional
Determines which Kalman filter to use. Default is conventional.
inversion_method : int, optional
Determines which inversion technique to use. Default is by Cholesky
decomposition.
stability_method : int, optional
Determines which numerical stability techniques to use. Default is
to enforce symmetry of the predicted state covariance matrix.
conserve_memory : int, optional
Determines what output from the filter to store. Default is to
store everything.
filter_timing : int, optional
Determines the timing convention of the filter. Default is that
from Durbin and Koopman (2012), in which the filter is initialized
with predicted values.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
Notes
-----
This function by default does not compute variables required for
smoothing.
"""
# Handle memory conservation
if conserve_memory is None:
conserve_memory = self.conserve_memory | MEMORY_NO_SMOOTHING
conserve_memory_cache = self.conserve_memory
self.set_conserve_memory(conserve_memory)
# Run the filter
kfilter = self._filter(
filter_method, inversion_method, stability_method, conserve_memory,
filter_timing, tolerance, loglikelihood_burn, complex_step)
# Create the results object
results = self.results_class(self)
results.update_representation(self)
results.update_filter(kfilter)
# Resent memory conservation
self.set_conserve_memory(conserve_memory_cache)
return results
def loglike(self, **kwargs):
r"""
Calculate the loglikelihood associated with the statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Returns
-------
loglike : float
The joint loglikelihood.
"""
kwargs.setdefault('conserve_memory',
MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD)
kfilter = self._filter(**kwargs)
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
if not (kwargs['conserve_memory'] & MEMORY_NO_LIKELIHOOD):
loglike = np.sum(kfilter.loglikelihood[loglikelihood_burn:])
else:
loglike = np.sum(kfilter.loglikelihood)
# Need to modify the computed log-likelihood to incorporate the
# MLE scale.
if self.filter_method & FILTER_CONCENTRATED:
d = max(loglikelihood_burn, kfilter.nobs_diffuse)
nobs_k_endog = np.sum(
self.k_endog -
np.array(self._statespace.nmissing)[d:])
# In the univariate case, we need to subtract observations
# associated with a singular forecast error covariance matrix
nobs_k_endog -= kfilter.nobs_kendog_univariate_singular
if not (kwargs['conserve_memory'] & MEMORY_NO_LIKELIHOOD):
scale = np.sum(kfilter.scale[d:]) / nobs_k_endog
else:
scale = kfilter.scale[0] / nobs_k_endog
loglike += -0.5 * nobs_k_endog
# Now need to modify this for diffuse initialization, since for
# diffuse periods we only need to add in the scale value part if
# the diffuse forecast error covariance matrix element was singular
if kfilter.nobs_diffuse > 0:
nobs_k_endog -= kfilter.nobs_kendog_diffuse_nonsingular
loglike += -0.5 * nobs_k_endog * np.log(scale)
return loglike
def loglikeobs(self, **kwargs):
r"""
Calculate the loglikelihood for each observation associated with the
statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Notes
-----
If `loglikelihood_burn` is positive, then the entries in the returned
loglikelihood vector are set to be zero for those initial time periods.
Returns
-------
loglike : array of float
Array of loglikelihood values for each observation.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
if not self.filter_method & FILTER_CONCENTRATED:
kwargs.setdefault('conserve_memory',
MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD)
else:
kwargs.setdefault(
'conserve_memory',
MEMORY_CONSERVE ^ (MEMORY_NO_FORECAST | MEMORY_NO_LIKELIHOOD))
kfilter = self._filter(**kwargs)
llf_obs = np.array(kfilter.loglikelihood, copy=True)
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
# If the scale was concentrated out of the log-likelihood function,
# then the llf_obs above is:
# -0.5 * k_endog * log 2 * pi - 0.5 * log |F_t|
# and we need to add in the effect of the scale:
# -0.5 * k_endog * log scale - 0.5 v' F_t^{-1} v / scale
# and note that v' F_t^{-1} is in the _kalman_filter.scale array
# Also note that we need to adjust the nobs and k_endog in both the
# denominator of the scale computation and in the llf_obs adjustment
# to take into account missing values.
if self.filter_method & FILTER_CONCENTRATED:
d = max(loglikelihood_burn, kfilter.nobs_diffuse)
nmissing = np.array(self._statespace.nmissing)
nobs_k_endog = np.sum(self.k_endog - nmissing[d:])
# In the univariate case, we need to subtract observations
# associated with a singular forecast error covariance matrix
nobs_k_endog -= kfilter.nobs_kendog_univariate_singular
scale = np.sum(kfilter.scale[d:]) / nobs_k_endog
# Need to modify this for diffuse initialization, since for
# diffuse periods we only need to add in the scale value if the
# diffuse forecast error covariance matrix element was singular
nsingular = 0
if kfilter.nobs_diffuse > 0:
d = kfilter.nobs_diffuse
Finf = kfilter.forecast_error_diffuse_cov
singular = np.diagonal(Finf).real <= kfilter.tolerance_diffuse
nsingular = np.sum(~singular, axis=1)
scale_obs = np.array(kfilter.scale, copy=True)
llf_obs += -0.5 * (
(self.k_endog - nmissing - nsingular) * np.log(scale) +
scale_obs / scale)
# Set any burned observations to have zero likelihood
llf_obs[:loglikelihood_burn] = 0
return llf_obs
def simulate(self, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None,
pretransformed_measurement_shocks=True,
pretransformed_state_shocks=True,
pretransformed_initial_state=True,
simulator=None, return_simulator=False,
random_state=None):
r"""
Simulate a new time series following the state space model
Parameters
----------
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the state vector at time zero, which should
be shaped (`k_states` x 1), where `k_states` is the same as in the
state space model. If unspecified, but the model has been
initialized, then that initialization is used. If unspecified and
the model has not been initialized, then a vector of zeros is used.
Note that this is not included in the returned `simulated_states`
array.
pretransformed_measurement_shocks : bool, optional
If `measurement_shocks` is provided, this flag indicates whether it
should be directly used as the shocks. If False, then it is assumed
to contain draws from the standard Normal distribution that must be
transformed using the `obs_cov` covariance matrix. Default is True.
pretransformed_state_shocks : bool, optional
If `state_shocks` is provided, this flag indicates whether it
should be directly used as the shocks. If False, then it is assumed
to contain draws from the standard Normal distribution that must be
transformed using the `state_cov` covariance matrix. Default is
True.
pretransformed_initial_state : bool, optional
If `initial_state` is provided, this flag indicates whether it
should be directly used as the initial_state. If False, then it is
assumed to contain draws from the standard Normal distribution that
must be transformed using the `initial_state_cov` covariance
matrix. Default is True.
return_simulator : bool, optional
Whether or not to return the simulator object. Typically used to
improve performance when performing repeated sampling. Default is
False.
random_state : {None, int, Generator, RandomState}, optionall
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
simulated_obs : ndarray
An (nsimulations x k_endog) array of simulated observations.
simulated_states : ndarray
An (nsimulations x k_states) array of simulated states.
simulator : SimulationSmoothResults
If `return_simulator=True`, then an instance of a simulator is
returned, which can be reused for additional simulations of the
same size.
"""
time_invariant = self.time_invariant
# Check for valid number of simulations
if not time_invariant and nsimulations > self.nobs:
raise ValueError('In a time-varying model, cannot create more'
' simulations than there are observations.')
return self._simulate(
nsimulations,
measurement_disturbance_variates=measurement_shocks,
state_disturbance_variates=state_shocks,
initial_state_variates=initial_state,
pretransformed_measurement_disturbance_variates=(
pretransformed_measurement_shocks),
pretransformed_state_disturbance_variates=(
pretransformed_state_shocks),
pretransformed_initial_state_variates=(
pretransformed_initial_state),
simulator=simulator, return_simulator=return_simulator,
random_state=random_state)
def _simulate(self, nsimulations, simulator=None, random_state=None,
**kwargs):
raise NotImplementedError('Simulation only available through'
' the simulation smoother.')
def impulse_responses(self, steps=10, impulse=0, orthogonalized=False,
cumulative=False, direct=False):
r"""
Impulse response function
Parameters
----------
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 10. Note that the initial impulse is not counted as a
step, so if `steps=1`, the output will have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1` where `k_posdef` is the same as in the state
space model. Alternatively, a custom impulse vector may be
provided; must be a column vector with shape `(k_posdef, 1)`.
orthogonalized : bool, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : bool, optional
Whether or not to return cumulative impulse responses. Default is
False.
Returns
-------
impulse_responses : ndarray
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. A (steps + 1 x k_endog) array.
Notes
-----
Intercepts in the measurement and state equation are ignored when
calculating impulse responses.
TODO: add note about how for time-varying systems this is - perhaps
counter-intuitively - returning the impulse response within the given
model (i.e. starting at period 0 defined by the model) and it is *not*
doing impulse responses after the end of the model. To compute impulse
responses from arbitrary time points, it is necessary to clone a new
model with the appropriate system matrices.
"""
# We need to add an additional step, since the first simulated value
# will always be zeros (note that we take this value out at the end).
steps += 1
# For time-invariant models, add an additional `step`. This is the
# default for time-invariant models based on the expected behavior for
# ARIMA and VAR models: we want to record the initial impulse and also
# `steps` values of the responses afterwards.
if (self._design.shape[2] == 1 and self._transition.shape[2] == 1 and
self._selection.shape[2] == 1):
steps += 1
# Check for what kind of impulse we want
if type(impulse) is int:
if impulse >= self.k_posdef or impulse < 0:
raise ValueError('Invalid value for `impulse`. Must be the'
' index of one of the state innovations.')
# Create the (non-orthogonalized) impulse vector
idx = impulse
impulse = np.zeros(self.k_posdef)
impulse[idx] = 1
else:
impulse = np.array(impulse)
if impulse.ndim > 1:
impulse = np.squeeze(impulse)
if not impulse.shape == (self.k_posdef,):
raise ValueError('Invalid impulse vector. Must be shaped'
' (%d,)' % self.k_posdef)
# Orthogonalize the impulses, if requested, using Cholesky on the
# first state covariance matrix
if orthogonalized:
state_chol = np.linalg.cholesky(self.state_cov[:, :, 0])
impulse = np.dot(state_chol, impulse)
# If we have time-varying design, transition, or selection matrices,
# then we can't produce more IRFs than we have time points
time_invariant_irf = (
self._design.shape[2] == self._transition.shape[2] ==
self._selection.shape[2] == 1)
# Note: to generate impulse responses following the end of a
# time-varying model, one should `clone` the state space model with the
# new time-varying model, and then compute the IRFs using the cloned
# model
if not time_invariant_irf and steps > self.nobs:
raise ValueError('In a time-varying model, cannot create more'
' impulse responses than there are'
' observations')
# Impulse responses only depend on the design, transition, and
# selection matrices. We set the others to zeros because they must be
# set in the call to `clone`.
# Note: we don't even need selection after the first point, because
# the state shocks will be zeros in every period except the first.
sim_model = self.clone(
endog=np.zeros((steps, self.k_endog), dtype=self.dtype),
obs_intercept=np.zeros(self.k_endog),
design=self['design', :, :, :steps],
obs_cov=np.zeros((self.k_endog, self.k_endog)),
state_intercept=np.zeros(self.k_states),
transition=self['transition', :, :, :steps],
selection=self['selection', :, :, :steps],
state_cov=np.zeros((self.k_posdef, self.k_posdef)))
# Get the impulse response function via simulation of the state
# space model, but with other shocks set to zero
measurement_shocks = np.zeros((steps, self.k_endog))
state_shocks = np.zeros((steps, self.k_posdef))
state_shocks[0] = impulse
initial_state = np.zeros((self.k_states,))
irf, _ = sim_model.simulate(
steps, measurement_shocks=measurement_shocks,
state_shocks=state_shocks, initial_state=initial_state)
# Get the cumulative response if requested
if cumulative:
irf = np.cumsum(irf, axis=0)
# Here we ignore the first value, because it is always zeros (we added
# an additional `step` at the top to account for this).
return irf[1:]
class FilterResults(FrozenRepresentation):
"""
Results from applying the Kalman filter to a state space model.
Parameters
----------
model : Representation
A Statespace representation
Attributes
----------
nobs : int
Number of observations.
nobs_diffuse : int
Number of observations under the diffuse Kalman filter.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
k_posdef : int
The dimension of a guaranteed positive definite
covariance matrix describing the shocks in the
measurement equation.
dtype : dtype
Datatype of representation matrices
prefix : str
BLAS prefix of representation matrices
shapes : dictionary of name,tuple
A dictionary recording the shapes of each of the
representation matrices as tuples.
endog : ndarray
The observation vector.
design : ndarray
The design matrix, :math:`Z`.
obs_intercept : ndarray
The intercept for the observation equation, :math:`d`.
obs_cov : ndarray
The covariance matrix for the observation equation :math:`H`.
transition : ndarray
The transition matrix, :math:`T`.
state_intercept : ndarray
The intercept for the transition equation, :math:`c`.
selection : ndarray
The selection matrix, :math:`R`.
state_cov : ndarray
The covariance matrix for the state equation :math:`Q`.
missing : array of bool
An array of the same size as `endog`, filled
with boolean values that are True if the
corresponding entry in `endog` is NaN and False
otherwise.
nmissing : array of int
An array of size `nobs`, where the ith entry
is the number (between 0 and `k_endog`) of NaNs in
the ith row of the `endog` array.
time_invariant : bool
Whether or not the representation matrices are time-invariant
initialization : str
Kalman filter initialization method.
initial_state : array_like
The state vector used to initialize the Kalamn filter.
initial_state_cov : array_like
The state covariance matrix used to initialize the Kalamn filter.
initial_diffuse_state_cov : array_like
Diffuse state covariance matrix used to initialize the Kalamn filter.
filter_method : int
Bitmask representing the Kalman filtering method
inversion_method : int
Bitmask representing the method used to
invert the forecast error covariance matrix.
stability_method : int
Bitmask representing the methods used to promote
numerical stability in the Kalman filter
recursions.
conserve_memory : int
Bitmask representing the selected memory conservation method.
filter_timing : int
Whether or not to use the alternate timing convention.
tolerance : float
The tolerance at which the Kalman filter
determines convergence to steady-state.
loglikelihood_burn : int
The number of initial periods during which
the loglikelihood is not recorded.
converged : bool
Whether or not the Kalman filter converged.
period_converged : int
The time period in which the Kalman filter converged.
filtered_state : ndarray
The filtered state vector at each time period.
filtered_state_cov : ndarray
The filtered state covariance matrix at each time period.
predicted_state : ndarray
The predicted state vector at each time period.
predicted_state_cov : ndarray
The predicted state covariance matrix at each time period.
forecast_error_diffuse_cov : ndarray
Diffuse forecast error covariance matrix at each time period.
predicted_diffuse_state_cov : ndarray
The predicted diffuse state covariance matrix at each time period.
kalman_gain : ndarray
The Kalman gain at each time period.
forecasts : ndarray
The one-step-ahead forecasts of observations at each time period.
forecasts_error : ndarray
The forecast errors at each time period.
forecasts_error_cov : ndarray
The forecast error covariance matrices at each time period.
llf_obs : ndarray
The loglikelihood values at each time period.
"""
_filter_attributes = [
'filter_method', 'inversion_method', 'stability_method',
'conserve_memory', 'filter_timing', 'tolerance', 'loglikelihood_burn',
'converged', 'period_converged', 'filtered_state',
'filtered_state_cov', 'predicted_state', 'predicted_state_cov',
'forecasts_error_diffuse_cov', 'predicted_diffuse_state_cov',
'tmp1', 'tmp2', 'tmp3', 'tmp4', 'forecasts',
'forecasts_error', 'forecasts_error_cov', 'llf', 'llf_obs',
'collapsed_forecasts', 'collapsed_forecasts_error',
'collapsed_forecasts_error_cov', 'scale'
]
_filter_options = (
KalmanFilter.filter_methods + KalmanFilter.stability_methods +
KalmanFilter.inversion_methods + KalmanFilter.memory_options
)
_attributes = FrozenRepresentation._model_attributes + _filter_attributes
def __init__(self, model):
super(FilterResults, self).__init__(model)
# Setup caches for uninitialized objects
self._kalman_gain = None
self._standardized_forecasts_error = None
def update_representation(self, model, only_options=False):
"""
Update the results to match a given model
Parameters
----------
model : Representation
The model object from which to take the updated values.
only_options : bool, optional
If set to true, only the filter options are updated, and the state
space representation is not updated. Default is False.
Notes
-----
This method is rarely required except for internal usage.
"""
if not only_options:
super(FilterResults, self).update_representation(model)
# Save the options as boolean variables
for name in self._filter_options:
setattr(self, name, getattr(model, name, None))
def update_filter(self, kalman_filter):
"""
Update the filter results
Parameters
----------
kalman_filter : statespace.kalman_filter.KalmanFilter
The model object from which to take the updated values.
Notes
-----
This method is rarely required except for internal usage.
"""
# State initialization
self.initial_state = np.array(
kalman_filter.model.initial_state, copy=True
)
self.initial_state_cov = np.array(
kalman_filter.model.initial_state_cov, copy=True
)
# Save Kalman filter parameters
self.filter_method = kalman_filter.filter_method
self.inversion_method = kalman_filter.inversion_method
self.stability_method = kalman_filter.stability_method
self.conserve_memory = kalman_filter.conserve_memory
self.filter_timing = kalman_filter.filter_timing
self.tolerance = kalman_filter.tolerance
self.loglikelihood_burn = kalman_filter.loglikelihood_burn
# Save Kalman filter output
self.converged = bool(kalman_filter.converged)
self.period_converged = kalman_filter.period_converged
self.univariate_filter = np.array(kalman_filter.univariate_filter,
copy=True)
self.filtered_state = np.array(kalman_filter.filtered_state, copy=True)
self.filtered_state_cov = np.array(
kalman_filter.filtered_state_cov, copy=True
)
self.predicted_state = np.array(
kalman_filter.predicted_state, copy=True
)
self.predicted_state_cov = np.array(
kalman_filter.predicted_state_cov, copy=True
)
# Reset caches
has_missing = np.sum(self.nmissing) > 0
if not (self.memory_no_std_forecast or self.invert_lu or
self.solve_lu or self.filter_collapsed):
if has_missing:
self._standardized_forecasts_error = np.array(
reorder_missing_vector(
kalman_filter.standardized_forecast_error,
self.missing, prefix=self.prefix))
else:
self._standardized_forecasts_error = np.array(
kalman_filter.standardized_forecast_error, copy=True)
else:
self._standardized_forecasts_error = None
# In the partially missing data case, all entries will
# be in the upper left submatrix rather than the correct placement
# Re-ordering does not make sense in the collapsed case.
if has_missing and (not self.memory_no_gain and
not self.filter_collapsed):
self._kalman_gain = np.array(reorder_missing_matrix(
kalman_filter.kalman_gain, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp1 = np.array(reorder_missing_matrix(
kalman_filter.tmp1, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp2 = np.array(reorder_missing_vector(
kalman_filter.tmp2, self.missing, prefix=self.prefix))
self.tmp3 = np.array(reorder_missing_matrix(
kalman_filter.tmp3, self.missing, reorder_rows=True,
prefix=self.prefix))
self.tmp4 = np.array(reorder_missing_matrix(
kalman_filter.tmp4, self.missing, reorder_cols=True,
reorder_rows=True, prefix=self.prefix))
else:
if not self.memory_no_gain:
self._kalman_gain = np.array(
kalman_filter.kalman_gain, copy=True)
self.tmp1 = np.array(kalman_filter.tmp1, copy=True)
self.tmp2 = np.array(kalman_filter.tmp2, copy=True)
self.tmp3 = np.array(kalman_filter.tmp3, copy=True)
self.tmp4 = np.array(kalman_filter.tmp4, copy=True)
self.M = np.array(kalman_filter.M, copy=True)
self.M_diffuse = np.array(kalman_filter.M_inf, copy=True)
# Note: use forecasts rather than forecast, so as not to interfer
# with the `forecast` methods in subclasses
self.forecasts = np.array(kalman_filter.forecast, copy=True)
self.forecasts_error = np.array(
kalman_filter.forecast_error, copy=True
)
self.forecasts_error_cov = np.array(
kalman_filter.forecast_error_cov, copy=True
)
# Note: below we will set self.llf, and in the memory_no_likelihood
# case we will replace self.llf_obs = None at that time.
self.llf_obs = np.array(kalman_filter.loglikelihood, copy=True)
# Diffuse objects
self.nobs_diffuse = kalman_filter.nobs_diffuse
self.initial_diffuse_state_cov = None
self.forecasts_error_diffuse_cov = None
self.predicted_diffuse_state_cov = None
if self.nobs_diffuse > 0:
self.initial_diffuse_state_cov = np.array(
kalman_filter.model.initial_diffuse_state_cov, copy=True)
self.predicted_diffuse_state_cov = np.array(
kalman_filter.predicted_diffuse_state_cov, copy=True)
if has_missing and not self.filter_collapsed:
self.forecasts_error_diffuse_cov = np.array(
reorder_missing_matrix(
kalman_filter.forecast_error_diffuse_cov,
self.missing, reorder_cols=True, reorder_rows=True,
prefix=self.prefix))
else:
self.forecasts_error_diffuse_cov = np.array(
kalman_filter.forecast_error_diffuse_cov, copy=True)
# If there was missing data, save the original values from the Kalman
# filter output, since below will set the values corresponding to
# the missing observations to nans.
self.missing_forecasts = None
self.missing_forecasts_error = None
self.missing_forecasts_error_cov = None
if np.sum(self.nmissing) > 0:
# Copy the provided arrays (which are as the Kalman filter dataset)
# into new variables
self.missing_forecasts = np.copy(self.forecasts)
self.missing_forecasts_error = np.copy(self.forecasts_error)
self.missing_forecasts_error_cov = (
np.copy(self.forecasts_error_cov)
)
# Save the collapsed values
self.collapsed_forecasts = None
self.collapsed_forecasts_error = None
self.collapsed_forecasts_error_cov = None
if self.filter_collapsed:
# Copy the provided arrays (which are from the collapsed dataset)
# into new variables
self.collapsed_forecasts = self.forecasts[:self.k_states, :]
self.collapsed_forecasts_error = (
self.forecasts_error[:self.k_states, :]
)
self.collapsed_forecasts_error_cov = (
self.forecasts_error_cov[:self.k_states, :self.k_states, :]
)
# Recreate the original arrays (which should be from the original
# dataset) in the appropriate dimension
dtype = self.collapsed_forecasts.dtype
self.forecasts = np.zeros((self.k_endog, self.nobs), dtype=dtype)
self.forecasts_error = np.zeros((self.k_endog, self.nobs),
dtype=dtype)
self.forecasts_error_cov = (
np.zeros((self.k_endog, self.k_endog, self.nobs), dtype=dtype)
)
# Fill in missing values in the forecast, forecast error, and
# forecast error covariance matrix (this is required due to how the
# Kalman filter implements observations that are either partly or
# completely missing)
# Construct the predictions, forecasts
can_compute_mean = not (self.memory_no_forecast_mean or
self.memory_no_predicted_mean)
can_compute_cov = not (self.memory_no_forecast_cov or
self.memory_no_predicted_cov)
if can_compute_mean or can_compute_cov:
for t in range(self.nobs):
design_t = 0 if self.design.shape[2] == 1 else t
obs_cov_t = 0 if self.obs_cov.shape[2] == 1 else t
obs_intercept_t = 0 if self.obs_intercept.shape[1] == 1 else t
# For completely missing observations, the Kalman filter will
# produce forecasts, but forecast errors and the forecast
# error covariance matrix will be zeros - make them nan to
# improve clarity of results.
if self.nmissing[t] > 0:
mask = ~self.missing[:, t].astype(bool)
# We can recover forecasts
# For partially missing observations, the Kalman filter
# will produce all elements (forecasts, forecast errors,
# forecast error covariance matrices) as usual, but their
# dimension will only be equal to the number of non-missing
# elements, and their location in memory will be in the
# first blocks (e.g. for the forecasts_error, the first
# k_endog - nmissing[t] columns will be filled in),
# regardless of which endogenous variables they refer to
# (i.e. the non- missing endogenous variables for that
# observation). Furthermore, the forecast error covariance
# matrix is only valid for those elements. What is done is
# to set all elements to nan for these observations so that
# they are flagged as missing. The variables
# missing_forecasts, etc. then provide the forecasts, etc.
# provided by the Kalman filter, from which the data can be
# retrieved if desired.
if can_compute_mean:
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t],
self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = np.nan
self.forecasts_error[mask, t] = (
self.endog[mask, t] - self.forecasts[mask, t])
# TODO: We should only fill in the non-masked elements of
# this array. Also, this will give the multivariate version
# even if univariate filtering was selected. Instead, we
# should use the reordering methods and then replace the
# masked values with NaNs
if can_compute_cov:
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
# In the collapsed case, everything just needs to be rebuilt
# for the original observed data, since the Kalman filter
# produced these values for the collapsed data.
elif self.filter_collapsed:
if can_compute_mean:
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t],
self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = (
self.endog[:, t] - self.forecasts[:, t]
)
if can_compute_cov:
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
# Note: if we concentrated out the scale, need to adjust the
# loglikelihood values and all of the covariance matrices and the
# values that depend on the covariance matrices
# Note: concentrated computation is not permitted with collapsed
# version, so we do not need to modify collapsed arrays.
self.scale = 1.
if self.filter_concentrated and self.model._scale is None:
d = max(self.loglikelihood_burn, self.nobs_diffuse)
# Compute the scale
nmissing = np.array(kalman_filter.model.nmissing)
nobs_k_endog = np.sum(self.k_endog - nmissing[d:])
# In the univariate case, we need to subtract observations
# associated with a singular forecast error covariance matrix
nobs_k_endog -= kalman_filter.nobs_kendog_univariate_singular
scale_obs = np.array(kalman_filter.scale, copy=True)
if not self.memory_no_likelihood:
self.scale = np.sum(scale_obs[d:]) / nobs_k_endog
else:
self.scale = scale_obs[0] / nobs_k_endog
# Need to modify this for diffuse initialization, since for
# diffuse periods we only need to add in the scale value if the
# diffuse forecast error covariance matrix element was singular
nsingular = 0
if kalman_filter.nobs_diffuse > 0:
Finf = kalman_filter.forecast_error_diffuse_cov
singular = (np.diagonal(Finf).real <=
kalman_filter.tolerance_diffuse)
nsingular = np.sum(~singular, axis=1)
# Adjust the loglikelihood obs (see `KalmanFilter.loglikeobs` for
# defaults on the adjustment)
if not self.memory_no_likelihood:
self.llf_obs += -0.5 * (
(self.k_endog - nmissing - nsingular) * np.log(self.scale)
+ scale_obs / self.scale)
else:
self.llf_obs[0] += -0.5 * np.squeeze(
np.sum(
(self.k_endog - nmissing - nsingular)
* np.log(self.scale)
)
+ scale_obs / self.scale
)
# Scale the filter output
self.obs_cov = self.obs_cov * self.scale
self.state_cov = self.state_cov * self.scale
self.initial_state_cov = self.initial_state_cov * self.scale
self.predicted_state_cov = self.predicted_state_cov * self.scale
self.filtered_state_cov = self.filtered_state_cov * self.scale
self.forecasts_error_cov = self.forecasts_error_cov * self.scale
if self.missing_forecasts_error_cov is not None:
self.missing_forecasts_error_cov = (
self.missing_forecasts_error_cov * self.scale)
# Note: do not have to adjust the Kalman gain or tmp4
self.tmp1 = self.tmp1 * self.scale
self.tmp2 = self.tmp2 / self.scale
self.tmp3 = self.tmp3 / self.scale
if not (self.memory_no_std_forecast or
self.invert_lu or
self.solve_lu or
self.filter_collapsed):
self._standardized_forecasts_error = (
self._standardized_forecasts_error / self.scale**0.5)
# The self.model._scale value is only not None within a fixed_scale
# context, in which case it is set and indicates that we should
# generally view this results object as using a concentrated scale
# (e.g. for d.o.f. computations), but because the fixed scale was
# actually applied to the model prior to filtering, we do not need to
# make any adjustments to the filter output, etc.
elif self.model._scale is not None:
self.filter_concentrated = True
self.scale = self.model._scale
# Now, save self.llf, and handle the memory_no_likelihood case
if not self.memory_no_likelihood:
self.llf = np.sum(self.llf_obs[self.loglikelihood_burn:])
else:
self.llf = self.llf_obs[0]
self.llf_obs = None
@property
def kalman_gain(self):
"""
Kalman gain matrices
"""
if self._kalman_gain is None:
# k x n
self._kalman_gain = np.zeros(
(self.k_states, self.k_endog, self.nobs), dtype=self.dtype)
for t in range(self.nobs):
# In the case of entirely missing observations, let the Kalman
# gain be zeros.
if self.nmissing[t] == self.k_endog:
continue
design_t = 0 if self.design.shape[2] == 1 else t
transition_t = 0 if self.transition.shape[2] == 1 else t
if self.nmissing[t] == 0:
self._kalman_gain[:, :, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[:, :, design_t]),
np.linalg.inv(self.forecasts_error_cov[:, :, t])
)
)
else:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
self._kalman_gain[:, mask, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[mask, :, design_t]),
np.linalg.inv(F[:, :, 0])
)
)
return self._kalman_gain
@property
def standardized_forecasts_error(self):
r"""
Standardized forecast errors
Notes
-----
The forecast errors produced by the Kalman filter are
.. math::
v_t \sim N(0, F_t)
Hypothesis tests are usually applied to the standardized residuals
.. math::
v_t^s = B_t v_t \sim N(0, I)
where :math:`B_t = L_t^{-1}` and :math:`F_t = L_t L_t'`; then
:math:`F_t^{-1} = (L_t')^{-1} L_t^{-1} = B_t' B_t`; :math:`B_t`
and :math:`L_t` are lower triangular. Finally,
:math:`B_t v_t \sim N(0, B_t F_t B_t')` and
:math:`B_t F_t B_t' = L_t^{-1} L_t L_t' (L_t')^{-1} = I`.
Thus we can rewrite :math:`v_t^s = L_t^{-1} v_t` or
:math:`L_t v_t^s = v_t`; the latter equation is the form required to
use a linear solver to recover :math:`v_t^s`. Since :math:`L_t` is
lower triangular, we can use a triangular solver (?TRTRS).
"""
if (self._standardized_forecasts_error is None
and not self.memory_no_forecast):
if self.k_endog == 1:
self._standardized_forecasts_error = (
self.forecasts_error /
self.forecasts_error_cov[0, 0, :]**0.5)
else:
from scipy import linalg
self._standardized_forecasts_error = np.zeros(
self.forecasts_error.shape, dtype=self.dtype)
for t in range(self.forecasts_error_cov.shape[2]):
if self.nmissing[t] > 0:
self._standardized_forecasts_error[:, t] = np.nan
if self.nmissing[t] < self.k_endog:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
try:
upper, _ = linalg.cho_factor(F[:, :, 0])
self._standardized_forecasts_error[mask, t] = (
linalg.solve_triangular(
upper, self.forecasts_error[mask, t],
trans=1))
except linalg.LinAlgError:
self._standardized_forecasts_error[mask, t] = (
np.nan)
return self._standardized_forecasts_error
def predict(self, start=None, end=None, dynamic=None, **kwargs):
r"""
In-sample and out-of-sample prediction for state space models generally
Parameters
----------
start : int, optional
Zero-indexed observation number at which to start prediction, i.e.,
the first prediction will be at start.
end : int, optional
Zero-indexed observation number at which to end prediction, i.e.,
the last prediction will be at end.
dynamic : int, optional
Offset relative to `start` at which to begin dynamic prediction.
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, predicted endogenous values will be used
instead.
**kwargs
If the prediction range is outside of the sample range, any
of the state space representation matrices that are time-varying
must have updated values provided for the out-of-sample range.
For example, of `obs_intercept` is a time-varying component and
the prediction range extends 10 periods beyond the end of the
sample, a (`k_endog` x 10) matrix must be provided with the new
intercept values.
Returns
-------
results : kalman_filter.PredictionResults
A PredictionResults object.
Notes
-----
All prediction is performed by applying the deterministic part of the
measurement equation using the predicted state variables.
Out-of-sample prediction first applies the Kalman filter to missing
data for the number of periods desired to obtain the predicted states.
"""
# Get the start and the end of the entire prediction range
if start is None:
start = 0
elif start < 0:
raise ValueError('Cannot predict values previous to the sample.')
if end is None:
end = self.nobs
# Prediction and forecasting is performed by iterating the Kalman
# Kalman filter through the entire range [0, end]
# Then, everything is returned corresponding to the range [start, end].
# In order to perform the calculations, the range is separately split
# up into the following categories:
# - static: (in-sample) the Kalman filter is run as usual
# - dynamic: (in-sample) the Kalman filter is run, but on missing data
# - forecast: (out-of-sample) the Kalman filter is run, but on missing
# data
# Short-circuit if end is before start
if end <= start:
raise ValueError('End of prediction must be after start.')
# Get the number of forecasts to make after the end of the sample
nforecast = max(0, end - self.nobs)
# Get the number of dynamic prediction periods
# If `dynamic=True`, then assume that we want to begin dynamic
# prediction at the start of the sample prediction.
if dynamic is True:
dynamic = 0
# If `dynamic=False`, then assume we want no dynamic prediction
if dynamic is False:
dynamic = None
# Check validity of dynamic and warn or error if issues
dynamic, ndynamic = _check_dynamic(dynamic, start, end, self.nobs)
# Get the number of in-sample static predictions
if dynamic is None:
nstatic = min(end, self.nobs) - min(start, self.nobs)
else:
# (use max(., 0), since dynamic can be prior to start)
nstatic = max(dynamic - start, 0)
# Cannot do in-sample prediction if we do not have appropriate
# arrays (we can do out-of-sample forecasting, however)
if nstatic > 0 and self.memory_no_forecast_mean:
raise ValueError('In-sample prediction is not available if memory'
' conservation has been used to avoid storing'
' forecast means.')
# Cannot do dynamic in-sample prediction if we do not have appropriate
# arrays (we can do out-of-sample forecasting, however)
if ndynamic > 0 and self.memory_no_predicted:
raise ValueError('In-sample dynamic prediction is not available if'
' memory conservation has been used to avoid'
' storing forecasted or predicted state means'
' or covariances.')
# Construct the predicted state and covariance matrix for each time
# period depending on whether that time period corresponds to
# one-step-ahead prediction, dynamic prediction, or out-of-sample
# forecasting.
# If we only have simple prediction, then we can use the already saved
# Kalman filter output
if ndynamic == 0 and nforecast == 0:
results = self
oos_results = None
# If we have dynamic prediction or forecasting, then we need to
# re-apply the Kalman filter
else:
# Figure out the period for which we need to run the Kalman filter
if dynamic is not None:
kf_start = min(dynamic, self.nobs)
else:
kf_start = self.nobs
kf_end = end
# Make start, end consistent with the results that we're generating
# start = max(start - kf_start, 0)
# end = kf_end - kf_start
# We must at least store forecasts and predictions
kwargs['conserve_memory'] = (
self.conserve_memory & ~MEMORY_NO_FORECAST &
~MEMORY_NO_PREDICTED)
# Can't use Chandrasekhar recursions for prediction
kwargs['filter_method'] = (
self.model.filter_method & ~FILTER_CHANDRASEKHAR)
# TODO: there is a corner case here when the filter has not
# exited the diffuse filter, in which case this known
# initialization is not correct.
# Even if we have not stored all predicted values (means and covs),
# we can still do pure out-of-sample forecasting because we will
# always have stored the last predicted values. In this case, we
# will initialize the forecasting filter with these values
if self.memory_no_predicted:
constant = self.predicted_state[..., -1]
stationary_cov = self.predicted_state_cov[..., -1]
# Otherwise initialize with the predicted state / cov from the
# existing results, at index kf_start (note that the time
# dimension of predicted_state and predicted_state_cov is
# self.nobs + 1; so e.g. in the case of pure forecasting we should
# be using the very last predicted state and predicted state cov
# elements, and kf_start will equal self.nobs which is correct)
else:
constant = self.predicted_state[..., kf_start]
stationary_cov = self.predicted_state_cov[..., kf_start]
kwargs.update({'initialization': 'known',
'constant': constant,
'stationary_cov': stationary_cov})
# Construct the new endogenous array.
endog = np.zeros((nforecast, self.k_endog)) * np.nan
model = self.model.extend(
endog, start=kf_start, end=kf_end - nforecast, **kwargs)
# Have to retroactively modify the model's endog
if ndynamic > 0:
model.endog[:, -(ndynamic + nforecast):] = np.nan
with model.fixed_scale(self.scale):
oos_results = model.filter()
results = self
return PredictionResults(results, start, end, nstatic, ndynamic,
nforecast, oos_results=oos_results)
class PredictionResults(FilterResults):
r"""
Results of in-sample and out-of-sample prediction for state space models
generally
Parameters
----------
results : FilterResults
Output from filtering, corresponding to the prediction desired
start : int
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
nstatic : int
Number of in-sample static predictions (these are always the first
elements of the prediction output).
ndynamic : int
Number of in-sample dynamic predictions (these always follow the static
predictions directly, and are directly followed by the forecasts).
nforecast : int
Number of in-sample forecasts (these always follow the dynamic
predictions directly).
Attributes
----------
npredictions : int
Number of observations in the predicted series; this is not necessarily
the same as the number of observations in the original model from which
prediction was performed.
start : int
Zero-indexed observation number at which to start prediction,
i.e., the first predict will be at `start`; this is relative to the
original model from which prediction was performed.
end : int
Zero-indexed observation number at which to end prediction,
i.e., the last predict will be at `end`; this is relative to the
original model from which prediction was performed.
nstatic : int
Number of in-sample static predictions.
ndynamic : int
Number of in-sample dynamic predictions.
nforecast : int
Number of in-sample forecasts.
endog : ndarray
The observation vector.
design : ndarray
The design matrix, :math:`Z`.
obs_intercept : ndarray
The intercept for the observation equation, :math:`d`.
obs_cov : ndarray
The covariance matrix for the observation equation :math:`H`.
transition : ndarray
The transition matrix, :math:`T`.
state_intercept : ndarray
The intercept for the transition equation, :math:`c`.
selection : ndarray
The selection matrix, :math:`R`.
state_cov : ndarray
The covariance matrix for the state equation :math:`Q`.
filtered_state : ndarray
The filtered state vector at each time period.
filtered_state_cov : ndarray
The filtered state covariance matrix at each time period.
predicted_state : ndarray
The predicted state vector at each time period.
predicted_state_cov : ndarray
The predicted state covariance matrix at each time period.
forecasts : ndarray
The one-step-ahead forecasts of observations at each time period.
forecasts_error : ndarray
The forecast errors at each time period.
forecasts_error_cov : ndarray
The forecast error covariance matrices at each time period.
Notes
-----
The provided ranges must be conformable, meaning that it must be that
`end - start == nstatic + ndynamic + nforecast`.
This class is essentially a view to the FilterResults object, but
returning the appropriate ranges for everything.
"""
representation_attributes = [
'endog', 'design', 'obs_intercept',
'obs_cov', 'transition', 'state_intercept', 'selection',
'state_cov'
]
filter_attributes = [
'filtered_state', 'filtered_state_cov',
'predicted_state', 'predicted_state_cov',
'forecasts', 'forecasts_error', 'forecasts_error_cov'
]
smoother_attributes = [
'smoothed_state', 'smoothed_state_cov',
]
def __init__(self, results, start, end, nstatic, ndynamic, nforecast,
oos_results=None):
# Save the filter results object
self.results = results
self.oos_results = oos_results
# Save prediction ranges
self.npredictions = start - end
self.start = start
self.end = end
self.nstatic = nstatic
self.ndynamic = ndynamic
self.nforecast = nforecast
self._predicted_signal = None
self._predicted_signal_cov = None
self._filtered_signal = None
self._filtered_signal_cov = None
self._smoothed_signal = None
self._smoothed_signal_cov = None
self._filtered_forecasts = None
self._filtered_forecasts_error_cov = None
self._smoothed_forecasts = None
self._smoothed_forecasts_error_cov = None
def clear(self):
attributes = (['endog'] + self.representation_attributes
+ self.filter_attributes)
for attr in attributes:
_attr = '_' + attr
if hasattr(self, _attr):
delattr(self, _attr)
def __getattr__(self, attr):
"""
Provide access to the representation and filtered output in the
appropriate range (`start` - `end`).
"""
# Prevent infinite recursive lookups
if attr[0] == '_':
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
_attr = '_' + attr
# Cache the attribute
if not hasattr(self, _attr):
if attr == 'endog' or attr in self.filter_attributes:
# Get a copy
value = getattr(self.results, attr).copy()
if self.ndynamic > 0:
end = self.end - self.ndynamic - self.nforecast
value = value[..., :end]
if self.oos_results is not None:
oos_value = getattr(self.oos_results, attr).copy()
# Note that the last element of the results predicted state
# and state cov will overlap with the first element of the
# oos predicted state and state cov, so eliminate the
# last element of the results versions
# But if we have dynamic prediction, then we have already
# eliminated the last element of the predicted state, so
# we do not need to do it here.
if self.ndynamic == 0 and attr[:9] == 'predicted':
value = value[..., :-1]
value = np.concatenate([value, oos_value], axis=-1)
# Subset to the correct time frame
value = value[..., self.start:self.end]
elif attr in self.smoother_attributes:
if self.ndynamic > 0:
raise NotImplementedError(
'Cannot retrieve smoothed attributes when using'
' dynamic prediction, since the information set used'
' to compute the smoothed results differs from the'
' information set implied by the dynamic prediction.')
# Get a copy
value = getattr(self.results, attr).copy()
# The oos_results object is only dynamic or out-of-sample,
# so filtered == smoothed
if self.oos_results is not None:
filtered_attr = 'filtered' + attr[8:]
oos_value = getattr(self.oos_results, filtered_attr).copy()
value = np.concatenate([value, oos_value], axis=-1)
# Subset to the correct time frame
value = value[..., self.start:self.end]
elif attr in self.representation_attributes:
value = getattr(self.results, attr).copy()
# If a time-invariant matrix, return it. Otherwise, subset to
# the correct period.
if value.shape[-1] == 1:
value = value[..., 0]
else:
if self.ndynamic > 0:
end = self.end - self.ndynamic - self.nforecast
value = value[..., :end]
if self.oos_results is not None:
oos_value = getattr(self.oos_results, attr).copy()
value = np.concatenate([value, oos_value], axis=-1)
value = value[..., self.start:self.end]
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
setattr(self, _attr, value)
return getattr(self, _attr)
def _compute_forecasts(self, states, states_cov, signal_only=False):
d = self.obs_intercept
Z = self.design
H = self.obs_cov
if d.ndim == 1:
d = d[:, None]
if Z.ndim == 2:
if not signal_only:
forecasts = d + Z @ states
forecasts_error_cov = (
Z[None, ...] @ states_cov.T @ Z.T[None, ...] + H.T).T
else:
forecasts = Z @ states
forecasts_error_cov = (
Z[None, ...] @ states_cov.T @ Z.T[None, ...]).T
else:
if not signal_only:
forecasts = d + (Z * states[None, :, :]).sum(axis=1)
tmp = Z[:, None, ...] * states_cov[None, ...]
tmp = (tmp[:, :, :, None, :]
* Z.transpose(1, 0, 2)[None, :, None, ...])
forecasts_error_cov = (tmp.sum(axis=1).sum(axis=1).T + H.T).T
else:
forecasts = (Z * states[None, :, :]).sum(axis=1)
tmp = Z[:, None, ...] * states_cov[None, ...]
tmp = (tmp[:, :, :, None, :]
* Z.transpose(1, 0, 2)[None, :, None, ...])
forecasts_error_cov = tmp.sum(axis=1).sum(axis=1)
return forecasts, forecasts_error_cov
@property
def predicted_signal(self):
if self._predicted_signal is None:
self._predicted_signal, self._predicted_signal_cov = (
self._compute_forecasts(self.predicted_state,
self.predicted_state_cov,
signal_only=True))
return self._predicted_signal
@property
def predicted_signal_cov(self):
if self._predicted_signal_cov is None:
self._predicted_signal, self._predicted_signal_cov = (
self._compute_forecasts(self.predicted_state,
self.predicted_state_cov,
signal_only=True))
return self._predicted_signal_cov
@property
def filtered_signal(self):
if self._filtered_signal is None:
self._filtered_signal, self._filtered_signal_cov = (
self._compute_forecasts(self.filtered_state,
self.filtered_state_cov,
signal_only=True))
return self._filtered_signal
@property
def filtered_signal_cov(self):
if self._filtered_signal_cov is None:
self._filtered_signal, self._filtered_signal_cov = (
self._compute_forecasts(self.filtered_state,
self.filtered_state_cov,
signal_only=True))
return self._filtered_signal_cov
@property
def smoothed_signal(self):
if self._smoothed_signal is None:
self._smoothed_signal, self._smoothed_signal_cov = (
self._compute_forecasts(self.smoothed_state,
self.smoothed_state_cov,
signal_only=True))
return self._smoothed_signal
@property
def smoothed_signal_cov(self):
if self._smoothed_signal_cov is None:
self._smoothed_signal, self._smoothed_signal_cov = (
self._compute_forecasts(self.smoothed_state,
self.smoothed_state_cov,
signal_only=True))
return self._smoothed_signal_cov
@property
def filtered_forecasts(self):
if self._filtered_forecasts is None:
self._filtered_forecasts, self._filtered_forecasts_cov = (
self._compute_forecasts(self.filtered_state,
self.filtered_state_cov))
return self._filtered_forecasts
@property
def filtered_forecasts_error_cov(self):
if self._filtered_forecasts_cov is None:
self._filtered_forecasts, self._filtered_forecasts_cov = (
self._compute_forecasts(self.filtered_state,
self.filtered_state_cov))
return self._filtered_forecasts_cov
@property
def smoothed_forecasts(self):
if self._smoothed_forecasts is None:
self._smoothed_forecasts, self._smoothed_forecasts_cov = (
self._compute_forecasts(self.smoothed_state,
self.smoothed_state_cov))
return self._smoothed_forecasts
@property
def smoothed_forecasts_error_cov(self):
if self._smoothed_forecasts_cov is None:
self._smoothed_forecasts, self._smoothed_forecasts_cov = (
self._compute_forecasts(self.smoothed_state,
self.smoothed_state_cov))
return self._smoothed_forecasts_cov
def _check_dynamic(dynamic, start, end, nobs):
"""
Verify dynamic and warn or error if issues
Parameters
----------
dynamic : {int, None}
The offset relative to start of the dynamic forecasts. None if no
dynamic forecasts are required.
start : int
The location of the first forecast.
end : int
The location of the final forecast (inclusive).
nobs : int
The number of observations in the time series.
Returns
-------
dynamic : {int, None}
The start location of the first dynamic forecast. None if there
are no in-sample dynamic forecasts.
ndynamic : int
The number of dynamic forecasts
"""
if dynamic is None:
return dynamic, 0
# Replace the relative dynamic offset with an absolute offset
dynamic = start + dynamic
# Validate the `dynamic` parameter
if dynamic < 0:
raise ValueError('Dynamic prediction cannot begin prior to the'
' first observation in the sample.')
elif dynamic > end:
warn('Dynamic prediction specified to begin after the end of'
' prediction, and so has no effect.', ValueWarning)
return None, 0
elif dynamic > nobs:
warn('Dynamic prediction specified to begin during'
' out-of-sample forecasting period, and so has no'
' effect.', ValueWarning)
return None, 0
# Get the total size of the desired dynamic forecasting component
# Note: the first `dynamic` periods of prediction are actually
# *not* dynamic, because dynamic prediction begins at observation
# `dynamic`.
ndynamic = max(0, min(end, nobs) - dynamic)
return dynamic, ndynamic
|
d2abb47391a19ded0c87acb301170b4d2e1b3e16
|
019f03d6713a2bc5344b644aeb5ebe70aaf7cfd0
|
/src/super_gradients/common/registry/registry.py
|
95da751cb63eed4c9bebec60e0e38a786bd974f8
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
Deci-AI/super-gradients
|
6f52cd15bc2f9f39e3cdc6067292b6512aba5dd0
|
7240726cf6425b53a26ed2faec03672f30fee6be
|
refs/heads/master
| 2023-08-25T17:47:02.595029
| 2023-08-24T11:50:50
| 2023-08-24T11:50:50
| 432,652,408
| 3,237
| 331
|
Apache-2.0
| 2023-09-14T11:24:46
| 2021-11-28T07:58:02
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,634
|
py
|
registry.py
|
import inspect
from typing import Callable, Dict, Optional
import torch
from torch import nn, optim
import torchvision
from super_gradients.common.object_names import Losses, Transforms, Samplers, Optimizers
def create_register_decorator(registry: Dict[str, Callable]) -> Callable:
"""
Create a decorator that registers object of specified type (model, metric, ...)
:param registry: Dict including registered objects (maps name to object that you register)
:return: Register function
"""
def register(name: Optional[str] = None) -> Callable:
"""
Set up a register decorator.
:param name: If specified, the decorated object will be registered with this name.
:return: Decorator that registers the callable.
"""
def decorator(cls: Callable) -> Callable:
"""Register the decorated callable"""
cls_name = name if name is not None else cls.__name__
if cls_name in registry:
ref = registry[cls_name]
raise Exception(f"`{cls_name}` is already registered and points to `{inspect.getmodule(ref).__name__}.{ref.__name__}")
registry[cls_name] = cls
return cls
return decorator
return register
ARCHITECTURES = {}
register_model = create_register_decorator(registry=ARCHITECTURES)
KD_ARCHITECTURES = {}
register_kd_model = create_register_decorator(registry=KD_ARCHITECTURES)
ALL_DETECTION_MODULES = {}
register_detection_module = create_register_decorator(registry=ALL_DETECTION_MODULES)
METRICS = {}
register_metric = create_register_decorator(registry=METRICS)
LOSSES = {Losses.MSE: nn.MSELoss}
register_loss = create_register_decorator(registry=LOSSES)
ALL_DATALOADERS = {}
register_dataloader = create_register_decorator(registry=ALL_DATALOADERS)
CALLBACKS = {}
register_callback = create_register_decorator(registry=CALLBACKS)
TRANSFORMS = {
Transforms.Compose: torchvision.transforms.Compose,
Transforms.ToTensor: torchvision.transforms.ToTensor,
Transforms.PILToTensor: torchvision.transforms.PILToTensor,
Transforms.ConvertImageDtype: torchvision.transforms.ConvertImageDtype,
Transforms.ToPILImage: torchvision.transforms.ToPILImage,
Transforms.Normalize: torchvision.transforms.Normalize,
Transforms.Resize: torchvision.transforms.Resize,
Transforms.CenterCrop: torchvision.transforms.CenterCrop,
Transforms.Pad: torchvision.transforms.Pad,
Transforms.Lambda: torchvision.transforms.Lambda,
Transforms.RandomApply: torchvision.transforms.RandomApply,
Transforms.RandomChoice: torchvision.transforms.RandomChoice,
Transforms.RandomOrder: torchvision.transforms.RandomOrder,
Transforms.RandomCrop: torchvision.transforms.RandomCrop,
Transforms.RandomHorizontalFlip: torchvision.transforms.RandomHorizontalFlip,
Transforms.RandomVerticalFlip: torchvision.transforms.RandomVerticalFlip,
Transforms.RandomResizedCrop: torchvision.transforms.RandomResizedCrop,
Transforms.FiveCrop: torchvision.transforms.FiveCrop,
Transforms.TenCrop: torchvision.transforms.TenCrop,
Transforms.LinearTransformation: torchvision.transforms.LinearTransformation,
Transforms.ColorJitter: torchvision.transforms.ColorJitter,
Transforms.RandomRotation: torchvision.transforms.RandomRotation,
Transforms.RandomAffine: torchvision.transforms.RandomAffine,
Transforms.Grayscale: torchvision.transforms.Grayscale,
Transforms.RandomGrayscale: torchvision.transforms.RandomGrayscale,
Transforms.RandomPerspective: torchvision.transforms.RandomPerspective,
Transforms.RandomErasing: torchvision.transforms.RandomErasing,
Transforms.GaussianBlur: torchvision.transforms.GaussianBlur,
Transforms.InterpolationMode: torchvision.transforms.InterpolationMode,
Transforms.RandomInvert: torchvision.transforms.RandomInvert,
Transforms.RandomPosterize: torchvision.transforms.RandomPosterize,
Transforms.RandomSolarize: torchvision.transforms.RandomSolarize,
Transforms.RandomAdjustSharpness: torchvision.transforms.RandomAdjustSharpness,
Transforms.RandomAutocontrast: torchvision.transforms.RandomAutocontrast,
Transforms.RandomEqualize: torchvision.transforms.RandomEqualize,
}
register_transform = create_register_decorator(registry=TRANSFORMS)
ALL_DATASETS = {}
register_dataset = create_register_decorator(registry=ALL_DATASETS)
ALL_PRE_LAUNCH_CALLBACKS = {}
register_pre_launch_callback = create_register_decorator(registry=ALL_PRE_LAUNCH_CALLBACKS)
BACKBONE_STAGES = {}
register_unet_backbone_stage = create_register_decorator(registry=BACKBONE_STAGES)
UP_FUSE_BLOCKS = {}
register_unet_up_block = create_register_decorator(registry=UP_FUSE_BLOCKS)
ALL_TARGET_GENERATORS = {}
register_target_generator = create_register_decorator(registry=ALL_TARGET_GENERATORS)
LR_SCHEDULERS_CLS_DICT = {}
register_lr_scheduler = create_register_decorator(registry=LR_SCHEDULERS_CLS_DICT)
LR_WARMUP_CLS_DICT = {}
register_lr_warmup = create_register_decorator(registry=LR_WARMUP_CLS_DICT)
SG_LOGGERS = {}
register_sg_logger = create_register_decorator(registry=SG_LOGGERS)
ALL_COLLATE_FUNCTIONS = {}
register_collate_function = create_register_decorator(registry=ALL_COLLATE_FUNCTIONS)
SAMPLERS = {
Samplers.DISTRIBUTED: torch.utils.data.DistributedSampler,
Samplers.SEQUENTIAL: torch.utils.data.SequentialSampler,
Samplers.SUBSET_RANDOM: torch.utils.data.SubsetRandomSampler,
Samplers.RANDOM: torch.utils.data.RandomSampler,
Samplers.WEIGHTED_RANDOM: torch.utils.data.WeightedRandomSampler,
}
register_sampler = create_register_decorator(registry=SAMPLERS)
OPTIMIZERS = {
Optimizers.SGD: optim.SGD,
Optimizers.ADAM: optim.Adam,
Optimizers.ADAMW: optim.AdamW,
Optimizers.RMS_PROP: optim.RMSprop,
}
TORCH_LR_SCHEDULERS = {
"StepLR": torch.optim.lr_scheduler.StepLR,
"LambdaLR": torch.optim.lr_scheduler.LambdaLR,
"MultiStepLR": torch.optim.lr_scheduler.MultiStepLR,
"ConstantLR": torch.optim.lr_scheduler.ConstantLR,
"CosineAnnealingLR": torch.optim.lr_scheduler.CosineAnnealingLR,
"CosineAnnealingWarmRestarts": torch.optim.lr_scheduler.CosineAnnealingWarmRestarts,
"CyclicLR": torch.optim.lr_scheduler.CyclicLR,
"ExponentialLR": torch.optim.lr_scheduler.ExponentialLR,
"ReduceLROnPlateau": torch.optim.lr_scheduler.ReduceLROnPlateau,
"LinearLR": torch.optim.lr_scheduler.LinearLR,
}
register_optimizer = create_register_decorator(registry=OPTIMIZERS)
PROCESSINGS = {}
register_processing = create_register_decorator(registry=PROCESSINGS)
|
15e7e81ed28eccec34ea5492ade5396463dcaff8
|
725ac5a0bf72829be627bf8dc82fdc51ba0f94ae
|
/Pretrain_Model/bert-pretrain/finetuning_task_demo.py
|
9ae196fc4b98f2697edc478b742e4a4cff9de03c
|
[] |
no_license
|
shawroad/NLP_pytorch_project
|
fa14b6e4a156229765e1d552901d0492d8e1def3
|
1272fed2dc8fef78a9ded0f1ae1644d613a3b57b
|
refs/heads/master
| 2023-06-25T02:37:35.503251
| 2023-06-12T10:57:11
| 2023-06-12T10:57:11
| 229,694,655
| 530
| 104
| null | 2020-12-08T09:21:47
| 2019-12-23T06:54:29
|
Python
|
UTF-8
|
Python
| false
| false
| 806
|
py
|
finetuning_task_demo.py
|
# -*- coding: utf-8 -*-
# @Time : 2020/7/24 19:50
# @Author : xiaolu
# @FileName: finetuning_task_demo.py
# @Software: PyCharm
import torch
from transformers import BertConfig, BertModel
from transformers import BertTokenizer
config = BertConfig.from_pretrained('./corpus/config.json')
model = BertModel.from_pretrained('./corpus/pytorch_model_epoch50.bin', config=config)
tokenizer = BertTokenizer.from_pretrained('./corpus/vocab.txt')
temp = tokenizer.encode_plus('你是煞笔', '你是刹车差', pad_to_max_length=True, max_length=128)
input_ids = torch.LongTensor([temp['input_ids']])
segment_ids = torch.LongTensor([temp['token_type_ids']])
attention_mask = torch.LongTensor([temp['attention_mask']])
seq_output, _ = model(input_ids, segment_ids, attention_mask)
print(seq_output.size())
|
50b0f68bbcc7369926a1057ea28071fcd7832222
|
27b86f422246a78704e0e84983b2630533a47db6
|
/docs/source/tutorials/src/customdata/xrecord.py
|
421c5a81581a5094acd21aed36f925f977e4c009
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 652
|
py
|
xrecord.py
|
# Copyright (c) 2021, Manfred Moitzi
# License: MIT License
from pathlib import Path
import ezdxf
DIR = Path("~/Desktop/Outbox").expanduser()
doc = ezdxf.new()
msp = doc.modelspace()
line = msp.add_line((0, 0), (1, 0))
line2 = msp.add_line((0, 2), (1, 2))
if line.has_extension_dict:
xdict = line.get_extension_dict()
else:
xdict = line.new_extension_dict()
xrecord = xdict.add_xrecord("DATA1")
xrecord.reset([
(1, "text1"), # string
(40, 3.141592), # float
(90, 256), # 32-bit int
(10, (1, 2, 0)), # points and vectors
(330, line2.dxf.handle) # handles
])
print(xrecord.tags)
doc.saveas(DIR / "xrecord.dxf")
|
d027e6f23e27072bbe09ec2b6af85c43a53e2250
|
f487532281c1c6a36a5c62a29744d8323584891b
|
/sdk/python/pulumi_azure/monitoring/__init__.py
|
d449512ded54e0897cd746535a8059f7f1d65ff9
|
[
"MPL-2.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure
|
a8f8f21c46c802aecf1397c737662ddcc438a2db
|
c16962e5c4f5810efec2806b8bb49d0da960d1ea
|
refs/heads/master
| 2023-08-25T00:17:05.290397
| 2023-08-24T06:11:55
| 2023-08-24T06:11:55
| 103,183,737
| 129
| 57
|
Apache-2.0
| 2023-09-13T05:44:10
| 2017-09-11T20:19:15
|
Java
|
UTF-8
|
Python
| false
| false
| 1,564
|
py
|
__init__.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .aad_diagnostic_setting import *
from .action_group import *
from .action_rule_action_group import *
from .action_rule_suppression import *
from .activity_log_alert import *
from .alert_processing_rule_action_group import *
from .alert_processing_rule_suppression import *
from .alert_prometheus_rule_group import *
from .autoscale_setting import *
from .data_collection_endpoint import *
from .data_collection_rule import *
from .data_collection_rule_association import *
from .diagnostic_setting import *
from .get_action_group import *
from .get_data_collection_endpoint import *
from .get_data_collection_rule import *
from .get_diagnostic_categories import *
from .get_log_profile import *
from .get_scheduled_query_rules_alert import *
from .get_scheduled_query_rules_log import *
from .log_profile import *
from .logz_monitor import *
from .logz_sub_account import *
from .logz_sub_account_tag_rule import *
from .logz_tag_rule import *
from .metric_alert import *
from .private_link_scope import *
from .private_link_scoped_service import *
from .scheduled_query_rules_alert import *
from .scheduled_query_rules_alert_v2 import *
from .scheduled_query_rules_log import *
from .smart_detector_alert_rule import *
from .workspace import *
from ._inputs import *
from . import outputs
|
6f147121c760f21b5b66d629e69b55a6f4a49ed9
|
e688afe613829d58686529c5cf6c16e6a911b400
|
/prepare_data.py
|
0a781639d1ad40530c36e8b892b1c4fdd67eb1c7
|
[] |
no_license
|
SKRohit/Generating_Text_Summary_With_GPT2
|
818d5b98d4196e417ae4ca3fbaceb44a3bb13ae1
|
f1da9d3cc8c287d8ce31211648e691dcae1f58bf
|
refs/heads/master
| 2023-01-24T12:11:37.734048
| 2023-01-14T08:56:39
| 2023-01-14T08:56:39
| 230,740,064
| 174
| 39
| null | 2023-01-14T08:57:12
| 2019-12-29T11:21:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,568
|
py
|
prepare_data.py
|
import json
import os
import pickle
import sys
import time
from utils import add_special_tokens
#tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
dm_single_close_quote = '\u2019' # unicode
dm_double_close_quote = '\u201d'
# acceptable ways to end a sentence
END_TOKENS = ['.', '!', '?', '...', "'", "`", '"',
dm_single_close_quote, dm_double_close_quote, ")"]
def fix_missing_period(line):
"""Adds a period to a line that is missing a period"""
if "@highlight" in line:
return line
if line == "":
return line
if line[-1] in END_TOKENS:
return line
return line + " ."
def get_art_abs(lines):
""" return as list of sentences"""
# truncated trailing spaces, and normalize spaces
lines = [' '.join(line.strip().split()) for line in lines]
lines = [fix_missing_period(line) for line in lines]
# Separate out article and abstract sentences
article_lines = []
highlights = []
next_is_highlight = False
for idx, line in enumerate(lines):
if line == "":
continue # empty line
elif line.startswith("@highlight"):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
return ' '.join(article_lines), ' '.join(highlights)
def write_json(i,article, abstract):
""" Saves a json file."""
file = "./gpt2_1024_data/"+str(i)+".json"
js_example = {}
js_example['id'] = i
js_example['article'] = article
js_example['abstract'] = abstract
with open(file, 'w') as f:
json.dump(js_example, f, ensure_ascii=False)
def main(file_names, directory):
""" Reads txt files, extract articles and summaries, tokenize them and save as json files
Args:
file_names: list, all the articles with total no of tokens less than 1024
directory: string, directory where files in file_names is stored
"""
tokenizer = add_special_tokens()
print("Execution Started...")
train_ids = []
file_id_map = {}
i = 0
for file in file_names:
file = os.path.join(os.getcwd(),directory,file)
with open(file,'r',encoding='utf-8') as f:
lines = f.read().split('\n\n')
article, abstract = get_art_abs(lines)
article, abstract = tokenizer.encode(article), tokenizer.encode(abstract)
if len(article)>0 and len(abstract)>0 and (len(article)+len(abstract))<=1023:
train_ids.append(i)
write_json(i,article,abstract)
file_id_map[i] = os.path.basename(file).replace('.story', '')
i += 1
if i%100==0:
print(i, " files written")
x,y = int(len(train_ids)*0.8), int(len(train_ids)*0.9)
valid_ids = train_ids[x:y]
test_ids = train_ids[y:]
train_ids = train_ids[:x]
with open("ids.json",'w') as f:
js = {}
js['train_ids'] = train_ids
js['valid_ids'] = valid_ids
js['test_ids'] = test_ids
json.dump(js,f)
# file_id_map maps the json file ids to actual cnn/dm file names ending with ".story"
print("saving file_id_map...")
with open("file_id_map.pickle", 'wb') as f:
pickle.dump(file_id_map,f)
print("file_id_map saved.")
if __name__ == '__main__':
start = time.time()
with open(sys.argv[1],'rb') as f:
file_sizes = pickle.load(f)
file_names = [file for file,size in file_sizes.items() if size<=1023] #only consider files with total no of tokens less than 1024
if sys.argv[1].startswith("cnn"):
directory = "cnn_stories_tokenized"
os.chdir('/CNN/')
else:
directory = "dm_stories_tokenized"
os.chdir('./DM/')
main(file_names, directory)
print("total_time_taken: ", (time.time()-start)/60, " minutes")
|
abf2b29a86558bb318ada9f020549af77411cd33
|
e7d73bb0305760a233407784fe3282d81f5985c9
|
/archive/skylift/cli_scan.py
|
3d98df81eb6d5d34be3442df2499cd68963f9c8f
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
adamhrv/skylift
|
bca9f55e7b9323a35ee8aa1fe372045176224a9f
|
6d3793b736b4cbfd21c573d7a9f408574103a948
|
refs/heads/master
| 2023-04-06T18:56:48.581099
| 2023-03-27T23:23:26
| 2023-03-27T23:23:26
| 90,250,911
| 320
| 56
|
MIT
| 2022-10-07T16:54:55
| 2017-05-04T10:29:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,540
|
py
|
cli_scan.py
|
# ---------------------------------------------------------------------------
# Not yet implemented
# TODO: add OSX, Linux, Windows and instructions for iOS scanning
# ---------------------------------------------------------------------------
import os
import click
from app.models.data_types import WiFiNet
from app.utils.net_parser import NetParser
from app.utils import scan_utils, logger_utils, file_utils
# ------------------------------------------------------
# Click Group
# ------------------------------------------------------
@click.group(chain=True)
@click.option('-v', '--verbose', 'opt_verbosity', count=True, default=4,
show_default=True,
help='Verbosity: -v DEBUG, -vv INFO, -vvv WARN, -vvvv ERROR, -vvvvv CRITICAL')
@click.pass_context
def cli(ctx, opt_verbosity):
logger_utils.Logger.create(verbosity=opt_verbosity)
# ------------------------------------------------------
# Scan
# ------------------------------------------------------
@cli.command('scan')
@click.option('-o', '--output', 'opt_fp_out', required=True)
@click.option('--os', 'opt_os', default='linux',
type=click.Choice(['mac', 'linux', 'windows']))
@click.pass_context
def scan(ctx, opt_fp_out, opt_os):
"""Scan WiFi networks using local system"""
# TODO: auto detect OS
log = logger_utils.Logger.getLogger()
log.error('not yet implemented')
return
scanner = scan_utils.Scanner()
if opt_os == 'mac':
scanner.osx()
else:
log.error('"{}" scan not yet implemented'.format(opt_os))
if __name__ == '__main__':
cli()
|
e7f2e4a3c845ad8b3b849a4a22a2f3625e106d17
|
f487532281c1c6a36a5c62a29744d8323584891b
|
/sdk/python/pulumi_azure/notificationhub/_inputs.py
|
e9f61bcfa2efb3183a62b3303f500a1ea65bccfb
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure
|
a8f8f21c46c802aecf1397c737662ddcc438a2db
|
c16962e5c4f5810efec2806b8bb49d0da960d1ea
|
refs/heads/master
| 2023-08-25T00:17:05.290397
| 2023-08-24T06:11:55
| 2023-08-24T06:11:55
| 103,183,737
| 129
| 57
|
Apache-2.0
| 2023-09-13T05:44:10
| 2017-09-11T20:19:15
|
Java
|
UTF-8
|
Python
| false
| false
| 4,493
|
py
|
_inputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'HubApnsCredentialArgs',
'HubGcmCredentialArgs',
]
@pulumi.input_type
class HubApnsCredentialArgs:
def __init__(__self__, *,
application_mode: pulumi.Input[str],
bundle_id: pulumi.Input[str],
key_id: pulumi.Input[str],
team_id: pulumi.Input[str],
token: pulumi.Input[str]):
"""
:param pulumi.Input[str] application_mode: The Application Mode which defines which server the APNS Messages should be sent to. Possible values are `Production` and `Sandbox`.
:param pulumi.Input[str] bundle_id: The Bundle ID of the iOS/macOS application to send push notifications for, such as `com.org.example`.
:param pulumi.Input[str] key_id: The Apple Push Notifications Service (APNS) Key.
:param pulumi.Input[str] team_id: The ID of the team the Token.
:param pulumi.Input[str] token: The Push Token associated with the Apple Developer Account. This is the contents of the `key` downloaded from [the Apple Developer Portal](https://developer.apple.com/account/ios/authkey/) between the `-----BEGIN PRIVATE KEY-----` and `-----END PRIVATE KEY-----` blocks.
"""
pulumi.set(__self__, "application_mode", application_mode)
pulumi.set(__self__, "bundle_id", bundle_id)
pulumi.set(__self__, "key_id", key_id)
pulumi.set(__self__, "team_id", team_id)
pulumi.set(__self__, "token", token)
@property
@pulumi.getter(name="applicationMode")
def application_mode(self) -> pulumi.Input[str]:
"""
The Application Mode which defines which server the APNS Messages should be sent to. Possible values are `Production` and `Sandbox`.
"""
return pulumi.get(self, "application_mode")
@application_mode.setter
def application_mode(self, value: pulumi.Input[str]):
pulumi.set(self, "application_mode", value)
@property
@pulumi.getter(name="bundleId")
def bundle_id(self) -> pulumi.Input[str]:
"""
The Bundle ID of the iOS/macOS application to send push notifications for, such as `com.org.example`.
"""
return pulumi.get(self, "bundle_id")
@bundle_id.setter
def bundle_id(self, value: pulumi.Input[str]):
pulumi.set(self, "bundle_id", value)
@property
@pulumi.getter(name="keyId")
def key_id(self) -> pulumi.Input[str]:
"""
The Apple Push Notifications Service (APNS) Key.
"""
return pulumi.get(self, "key_id")
@key_id.setter
def key_id(self, value: pulumi.Input[str]):
pulumi.set(self, "key_id", value)
@property
@pulumi.getter(name="teamId")
def team_id(self) -> pulumi.Input[str]:
"""
The ID of the team the Token.
"""
return pulumi.get(self, "team_id")
@team_id.setter
def team_id(self, value: pulumi.Input[str]):
pulumi.set(self, "team_id", value)
@property
@pulumi.getter
def token(self) -> pulumi.Input[str]:
"""
The Push Token associated with the Apple Developer Account. This is the contents of the `key` downloaded from [the Apple Developer Portal](https://developer.apple.com/account/ios/authkey/) between the `-----BEGIN PRIVATE KEY-----` and `-----END PRIVATE KEY-----` blocks.
"""
return pulumi.get(self, "token")
@token.setter
def token(self, value: pulumi.Input[str]):
pulumi.set(self, "token", value)
@pulumi.input_type
class HubGcmCredentialArgs:
def __init__(__self__, *,
api_key: pulumi.Input[str]):
"""
:param pulumi.Input[str] api_key: The API Key associated with the Google Cloud Messaging service.
"""
pulumi.set(__self__, "api_key", api_key)
@property
@pulumi.getter(name="apiKey")
def api_key(self) -> pulumi.Input[str]:
"""
The API Key associated with the Google Cloud Messaging service.
"""
return pulumi.get(self, "api_key")
@api_key.setter
def api_key(self, value: pulumi.Input[str]):
pulumi.set(self, "api_key", value)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.