blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43b06401cdd5e2e6719530812e0bea8ab2e649e7
|
f754c7515ba6cd510748e74036a9148b7d4167c7
|
/chapter_1/module_1_3_linked_list.py
|
734272571fb3dcd3d8dc21ccc08052d5d7d15776
|
[] |
no_license
|
ChangeMyUsername/algorithms-sedgewick-python
|
26497e986b8133beae526946fd7565d9e1f4163b
|
d3ccd86c93016c7fee270ad02e1a823d205cea80
|
refs/heads/master
| 2022-01-21T05:30:02.418482
| 2021-12-30T02:55:29
| 2021-12-30T02:55:29
| 35,562,449
| 330
| 110
| null | 2021-12-30T02:55:29
| 2015-05-13T17:06:50
|
Python
|
UTF-8
|
Python
| false
| false
| 8,854
|
py
|
module_1_3_linked_list.py
|
#!/usr/bin/env python
# -*- encoding:UTF-8 -*-
from __future__ import print_function
import doctest
from typing import Any, Union
from common import Node
class LinkedList(object):
"""
Linked list practice
"""
def __init__(self) -> None:
"""
Initialize method
"""
self._first = None
self._size = 0
def print_list(self) -> None:
"""Print all elements in linked list.
>>> lst = LinkedList()
>>> for i in range(1, 5):
... lst.append(i)
...
>>> lst.print_list()
1 2 3 4
"""
tmp = self._first
while tmp:
if not tmp.next_node:
print(tmp.val)
else:
print(tmp.val, end=' ')
tmp = tmp.next_node
def append(self, val: Any) -> None:
"""Append element to linked list.
Args:
val (Any): element to be appended
>>> lst = LinkedList()
>>> for i in range(1, 5):
... lst.append(i)
...
>>> lst.size()
4
"""
if not self._first:
self._first = Node(val)
self._size += 1
return
tmp = self._first
while tmp.next_node:
tmp = tmp.next_node
tmp.next_node = Node(val)
self._size += 1
# 1.3.19 practice
def delete_last(self) -> Union[None, Any]:
"""Delete last item in linked list.
Returns:
Union[None, Any]: None if linked list is empty,
else last element in linked list
>>> lst = LinkedList()
>>> for i in range(1, 5):
... lst.append(i)
...
>>> while lst.size():
... lst.delete_last()
4
3
2
1
"""
tmp = self._first
if not tmp:
return
if not self._first.next_node:
deleted_val = self._first.val
self._first = None
self._size -= 1
return deleted_val
while tmp.next_node.next_node:
tmp = tmp.next_node
deleted_val = tmp.next_node.val
tmp.next_node = None
self._size -= 1
return deleted_val
# 1.3.21 practice
def find(self, val: Any) -> bool:
"""Find if `val` in linked list.
Args:
val (Any): element to search in linked list
Returns:
bool: True if `val` in linked list else False
>>> lst = LinkedList()
>>> for i in range(1, 5):
... lst.append(i)
...
>>> lst.find(5)
False
>>> lst.find(4)
True
>>> lst.find(1)
True
"""
tmp = self._first
while tmp:
if tmp.val == val:
return True
tmp = tmp.next_node
return False
def size(self) -> int:
"""Return the size of linked list
Returns:
int: size of linked list
"""
return self._size
# 1.3.20 practice
def delete(self, pos: int) -> None:
"""Delete element from linked list
which element in n position (1-based).
Args:
pos (int): linked list position, if `pos` > self.size(),
then do nothing.
>>> lst = LinkedList()
>>> for i in range(1, 5):
... lst.append(i)
...
>>> lst.delete(1)
>>> lst.print_list()
2 3 4
>>> lst.delete(4)
>>> lst.print_list()
2 3 4
>>> lst.delete(2)
>>> lst.print_list()
2 4
"""
if pos > self._size:
return
if pos == 1:
self._first = self._first.next_node
self._size -= 1
return
tmp, count = self._first, 1
while count != pos - 1:
count += 1
tmp = tmp.next_node
target = tmp.next_node
tmp.next_node = tmp.next_node.next_node
target.next_node = None
self._size -= 1
# 1.3.24 practice, accept val as parameter instead of node as parameter
def remove_after(self, item: Any) -> None:
"""Remove element after `item`.
Args:
item (Any): element value in linked list
>>> lst = LinkedList()
>>> for i in range(10):
... lst.append(i)
...
>>> lst.remove_after(8)
>>> lst.remove_after(0)
>>> lst.print_list()
0 2 3 4 5 6 7 8
>>> lst.size()
8
"""
tmp = self._first
while tmp.next_node:
if tmp.val == item:
tmp.next_node = tmp.next_node.next_node
self._size -= 1
break
tmp = tmp.next_node
# 1.3.25 practice, accept val as parameter instead of node as parameter
def insert_after(self, current_node_item: Any, new_node_item: Any) -> None:
"""Insert `new_node_item` into linked list after `current_node_item`.
Args:
current_node_item (Any): existing element value
new_node_item (Any): new element value
>>> lst = LinkedList()
>>> for i in range(10):
... lst.append(i)
...
>>> lst.insert_after(0, 1.5)
>>> lst.print_list()
0 1.5 1 2 3 4 5 6 7 8 9
>>> lst.insert_after(9, 10)
>>> lst.print_list()
0 1.5 1 2 3 4 5 6 7 8 9 10
"""
tmp = self._first
while tmp:
if tmp.val == current_node_item:
old_next_node = tmp.next_node
new_node = Node(new_node_item)
tmp.next_node = new_node
new_node.next_node = old_next_node
self._size += 1
break
tmp = tmp.next_node
# 1.3.26 practice
def remove(self, key: Any) -> int:
"""Remove all `key` from linked list.
Args:
key (Any): element to be removed
Returns:
int: removed elements count
>>> lst = LinkedList()
>>> for i in range(10):
... lst.append(i)
>>> lst.append(8)
>>> lst.append(1)
>>> lst.remove(1)
2
>>> lst.remove(8)
2
>>> lst.print_list()
0 2 3 4 5 6 7 9
>>> lst2 = LinkedList()
>>> for i in range(5):
... lst2.append(i)
>>> lst2.remove(3)
1
>>> lst2.remove(3)
0
>>> lst3 = LinkedList()
>>> lst3.append(1)
>>> lst3.remove(1)
1
"""
removed_num = 0
tmp = self._first
prev = None
while tmp:
if tmp.val == key:
if not prev:
target = tmp
tmp = tmp.next_node
target.next_node = None
else:
prev.next_node = tmp.next_node
self._size -= 1
removed_num += 1
prev = tmp
if tmp:
tmp = tmp.next_node
return removed_num
# 1.3.27 practice
def max_value(self) -> Any:
"""Return maximum value in linked list, each value must be comparable.
Returns:
Any: maximum value in linked list, None if linked list is empty
>>> lst = LinkedList()
>>> for i in range(10):
... lst.append(i)
>>> lst.max_value()
9
"""
tmp = self._first
max_val = None
while tmp:
if max_val is None:
max_val = tmp.val
if tmp.val > max_val:
max_val = tmp.val
tmp = tmp.next_node
return max_val
# 1.3.30 practice
def reverse(self) -> Node:
"""Reverse linked list and return first node.
Returns:
Node: reversed first node
>>> lst = LinkedList()
>>> for i in range(10):
... lst.append(i)
>>> lst.print_list()
0 1 2 3 4 5 6 7 8 9
>>> lst.reverse().val
9
>>> lst.print_list()
9 8 7 6 5 4 3 2 1 0
"""
first = self._first
reverse_node = None
while first:
second = first.next_node
first.next_node = reverse_node
reverse_node = first
first = second
self._first = reverse_node
return reverse_node
def is_cyclic(self) -> bool:
if not self._first:
return False
fast = second = self._first
while fast and fast.next_node:
fast = fast.next_node.next_node
second = second.next_node
if fast == second:
return True
return False
if __name__ == '__main__':
doctest.testmod()
|
7371f6b0417f2ddc834d47e4517b66b1c883aef4
|
c5fd80ede07f0972a9b99d0c65a0df40e6d487fa
|
/pyocd/coresight/ap.py
|
8a37ef06422b1d6ccaac94ed81a2eccb64cb4d4a
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
pyocd/pyOCD
|
46330f3a10c9be381293d220cc025e0e347513ce
|
9253740baf46ebf4eacbce6bf3369150c5fb8ee0
|
refs/heads/main
| 2023-08-18T07:56:54.205305
| 2023-08-13T19:11:01
| 2023-08-13T19:11:01
| 13,862,423
| 507
| 204
|
Apache-2.0
| 2023-09-09T20:13:57
| 2013-10-25T14:10:05
|
Python
|
UTF-8
|
Python
| false
| false
| 56,484
|
py
|
ap.py
|
# pyOCD debugger
# Copyright (c) 2015-2020 Arm Limited
# Copyright (c) 2021-2022 Chris Reed
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from contextlib import contextmanager
from functools import total_ordering
from enum import Enum
from typing import (Any, Callable, Dict, Generator, Optional, TYPE_CHECKING, Sequence, Set, Tuple, Type, Union, overload)
from typing_extensions import Literal
from ..core import (exceptions, memory_interface)
from ..core.target import Target
from ..utility.concurrency import locked
if TYPE_CHECKING:
from types import TracebackType
from ..core.core_target import CoreTarget
from .dap import DebugPort
from .rom_table import CoreSightComponentID
from ..utility.notification import Notification
LOG = logging.getLogger(__name__)
TRACE = LOG.getChild("trace")
TRACE.setLevel(logging.CRITICAL)
## Offset of IDR register in an APv1.
AP_IDR = 0xFC
## Offset of IDR register in an APv2.
APv2_IDR = 0xDFC
A32 = 0x0c
APSEL_SHIFT = 24
APSEL = 0xff000000
APBANKSEL = 0x000000f0
APSEL_APBANKSEL = APSEL | APBANKSEL
## @brief Mask for register address within the AP address space.
#
# v1 APs have a 256 byte register space. v2 APs have a 4 kB register space. This mask is
# larger than the APv1 register space, but this is not problematic because v1 APs only have
# the 8-bit APSEL in bits 31:24 of the address, thus no possibility of conflict.
APREG_MASK = 0x00000ffc
# AP BASE register masks
AP_BASE_FORMAT_MASK = 0x2
AP_BASE_ENTRY_PRESENT_MASK = 0x1
AP_BASE_BASEADDR_MASK = 0xfffffffc
AP_BASE_LEGACY_NOTPRESENT = 0xffffffff # Legacy not present value
AP_BASE_LEGACY_BASEADDR_MASK = 0xfffff000
# AP IDR bitfields:
# [31:28] Revision
# [27:24] JEP106 continuation (0x4 for ARM)
# [23:17] JEP106 vendor ID (0x3B for ARM)
# [16:13] Class (0b1000=Mem-AP)
# [12:8] Reserved
# [7:4] AP Variant (non-zero for JTAG-AP)
# [3:0] AP Type
AP_IDR_REVISION_MASK = 0xf0000000
AP_IDR_REVISION_SHIFT = 28
AP_IDR_JEP106_MASK = 0x0ffe0000
AP_IDR_JEP106_SHIFT = 17
AP_IDR_CLASS_MASK = 0x0001e000
AP_IDR_CLASS_SHIFT = 13
AP_IDR_VARIANT_MASK = 0x000000f0
AP_IDR_VARIANT_SHIFT = 4
AP_IDR_TYPE_MASK = 0x0000000f
# The CoreSight ARCHID value for the CSSOC-600 APv1 Adapter.
UNKNOWN_AP_ARCHID = 0x0a47
## The control registers for a v2 MEM-AP start at an offset.
MEM_APv2_CONTROL_REG_OFFSET = 0xD00
# MEM-AP register addresses
MEM_AP_CSW = 0x00
MEM_AP_TAR = 0x04
MEM_AP_DRW = 0x0C
MEM_AP_TRR = 0x24 # Only APv2 with ERRv1
MEM_AP_BASE_HI = 0xF0
MEM_AP_CFG = 0xF4
MEM_AP_BASE = 0xF8
MEM_AP_CFG_TARINC_MASK = 0x000f0000
MEM_AP_CFG_TARINC_SHIFT = 16
MEM_AP_CFG_ERR_MASK = 0x00000f00
MEM_AP_CFG_ERR_SHIFT = 8
MEM_AP_CFG_DARSIZE_MASK = 0x000000f0
MEM_AP_CFG_DARSIZE_SHIFT = 4
MEM_AP_CFG_LD_MASK = 0x00000004
MEM_AP_CFG_LA_MASK = 0x00000002
MEM_AP_CFG_ERR_V1 = 1
MEM_AP_TRR_ERR_MASK = 0x00000001
# AP Control and Status Word definitions
CSW_SIZE = 0x00000007
CSW_SIZE8 = 0x00000000
CSW_SIZE16 = 0x00000001
CSW_SIZE32 = 0x00000002
CSW_SIZE64 = 0x00000003
CSW_SIZE128 = 0x00000004
CSW_SIZE256 = 0x00000005
CSW_ADDRINC = 0x00000030
CSW_NADDRINC = 0x00000000 # No increment
CSW_SADDRINC = 0x00000010 # Single increment by SIZE field
CSW_PADDRINC = 0x00000020 # Packed increment, supported only on M3/M3 AP
CSW_DEVICEEN = 0x00000040
CSW_TINPROG = 0x00000080 # Not implemented on M33 AHB5-AP
CSW_ERRNPASS = 0x00010000 # MEM-APv2 only
CSW_ERRSTOP = 0x00020000 # MEM-APv2 only
CSW_SDEVICEEN = 0x00800000 # Also called SPIDEN in ADIv5
CSW_HPROT = 0x0f000000
CSW_MSTRTYPE = 0x20000000 # Only present in M3/M3 AHB-AP, RES0 in others
CSW_MSTRCORE = 0x00000000
CSW_MSTRDBG = 0x20000000
CSW_DBGSWEN = 0x80000000 # Only present in CSSoC-400 APB-AP, RES0 in others
DEFAULT_CSW_VALUE = CSW_SADDRINC
TRANSFER_SIZE = {8: CSW_SIZE8,
16: CSW_SIZE16,
32: CSW_SIZE32,
64: CSW_SIZE64,
128: CSW_SIZE128,
256: CSW_SIZE256,
}
CSW_HPROT_MASK = 0x0f000000 # HPROT[3:0]
CSW_HPROT_SHIFT = 24
CSW_HNONSEC_MASK = 0x40000000
CSW_HNONSEC_SHIFT = 30
# HNONSECURE bits
SECURE = 0
NONSECURE = 1
# HPROT bits
HPROT_DATA = 0x01
HPROT_INSTR = 0x00
HPROT_PRIVILEGED = 0x02
HPROT_USER = 0x00
HPROT_BUFFERABLE = 0x04
HPROT_NONBUFFERABLE = 0x00
HPROT_CACHEABLE = 0x08
HPROT_NONCACHEABLE = 0x00
HPROT_LOOKUP = 0x10
HPROT_NO_LOOKUP = 0x00
HPROT_ALLOCATE = 0x20
HPROT_NO_ALLOCATE = 0x00
HPROT_SHAREABLE = 0x40
HPROT_NONSHAREABLE = 0x00
# Debug Exception and Monitor Control Register
DEMCR = 0xE000EDFC
# DWTENA in armv6 architecture reference manual
DEMCR_TRCENA = (1 << 24)
class APVersion(Enum):
"""@brief Supported versions of APs."""
## APv1 from ADIv5.x.
APv1 = 1
## APv2 from ADIv6.
APv2 = 2
@total_ordering
class APAddressBase:
"""@brief Base class for AP addresses.
An instance of this class has a "nominal address", which is an integer address in terms of how
it is typically referenced. For instance, for an APv1, the nominal address is the unshifted
APSEL, e.g. 0, 1, 2, and so on. This value is accessible by the _nominal_address_ property. It
is also used for hashing and ordering. One intentional side effect of this is that APAddress
instances match against the integer value of their nominal address, which is particularly useful
when they are keys in a dictionary.
In addition to the nominal address, there is an abstract _address_ property implemented by the
version-specific subclasses. This is the value used by the DP hardware and passed to the
DebugPort's read_ap() and write_ap() methods.
AP addresses include the index of the DP to which the AP is connected. On most systems there is only
one DP with index 0.
The class also indicates which version of AP is targeted: either APv1 or APv2. The _ap_version_
property reports this version number, though it is also encoded by the subclass. The AP version
is coupled with the address because the two are intrinsically connected; the version defines the
address format.
"""
def __init__(self, address: int, dp: int = 0) -> None:
"""@brief Constructor accepting the nominal address."""
self._nominal_address = address
self._dp = dp
@property
def ap_version(self) -> APVersion:
"""@brief Version of the AP, as an APVersion enum."""
raise NotImplementedError()
@property
def nominal_address(self) -> int:
"""@brief Integer AP address in the form in which one speaks about it.
This value is used for comparisons and hashing."""
return self._nominal_address
@property
def address(self) -> int:
"""@brief Integer AP address used as a base for register accesses.
This value can be passed to the DebugPort's read_ap() or write_ap() methods. Offsets of
registers can be added to this value to create register addresses."""
raise NotImplementedError()
@property
def idr_address(self) -> int:
"""@brief Address of the IDR register."""
raise NotImplementedError()
@property
def dp_index(self) -> int:
"""@brief Index of the DP to which this AP is attached."""
return self._dp
def __hash__(self) -> int:
return hash(self.nominal_address | (self._dp << 64))
def __eq__(self, other: Any) -> bool:
"""Equality tests against other APAddressBase or subclass instances also compare the DP index.
Supports comparing against raw (int) nominal addresses, in which case the DP index is ignored.
"""
if isinstance(other, APAddressBase):
return (self.nominal_address == other.nominal_address) and (self.dp_index == other.dp_index)
elif isinstance(other, int):
return (self.nominal_address == other)
else:
return False
def __lt__(self, other: Any) -> bool:
"""Ordering tests against other APAddressBase or subclass instances include the DP index, such that
instances with equal nominal addresses but different DP indices will be ordered by DP index.
Supports comparing against raw (int) nominal addresses, in which case the DP index is ignored.
"""
if isinstance(other, APAddressBase):
return (self.nominal_address < other.nominal_address) and (self.dp_index < other.dp_index)
elif isinstance(other, int):
return (self.nominal_address < other)
else:
return False
def __str__(self) -> str:
raise NotImplementedError()
def __repr__(self) -> str:
return "<{}@{:#x} {} dp={}>".format(self.__class__.__name__, id(self), str(self), self.dp_index)
class APv1Address(APAddressBase):
"""@brief Represents the address for an APv1.
The nominal address is the 8-bit APSEL value. This is written into the top byte of
the DP SELECT register to select the AP to communicate with.
"""
@property
def ap_version(self) -> APVersion:
"""@brief APVersion.APv1."""
return APVersion.APv1
@property
def apsel(self) -> int:
"""@brief Alias for the _nominal_address_ property."""
return self._nominal_address
@property
def address(self) -> int:
return self.apsel << APSEL_SHIFT
@property
def idr_address(self) -> int:
"""@brief Address of the IDR register."""
return AP_IDR
def __str__(self) -> str:
return "#%d" % self.apsel
class APv2Address(APAddressBase):
"""@brief Represents the address for an APv2.
ADIv6 uses an APB bus to communicate with APv2 instances. The nominal address is simply the base
address of the APB slave. The APB bus address width is variable from 12-52 bits in 8-bit steps.
This address is written the DP SELECT and possibly SELECT1 (for greater than 32 bit addresses)
registers to choose the AP to communicate with.
"""
@property
def ap_version(self) -> APVersion:
"""@brief Returns APVersion.APv2."""
return APVersion.APv2
@property
def address(self) -> int:
return self._nominal_address
@property
def idr_address(self) -> int:
"""@brief Address of the IDR register."""
return APv2_IDR
def __str__(self) -> str:
return "@0x%x" % self.address
class AccessPort:
"""@brief Base class for a CoreSight Access Port (AP) instance."""
@staticmethod
def probe(dp: "DebugPort", ap_num: int) -> bool:
"""@brief Determine if an AP exists with the given AP number.
Only applicable for ADIv5.
@param dp DebugPort instance.
@param ap_num The AP number (APSEL) to probe.
@return Boolean indicating if a valid AP exists with APSEL=ap_num.
"""
idr = dp.read_ap((ap_num << APSEL_SHIFT) | AP_IDR)
return idr != 0
@staticmethod
def create(
dp: "DebugPort",
ap_address: APAddressBase,
cmpid: Optional["CoreSightComponentID"] = None
) -> "AccessPort":
"""@brief Create a new AP object.
Determines the type of the AP by examining the IDR value and creates a new
AP object of the appropriate class. See #AP_TYPE_MAP for the mapping of IDR
fields to class.
@param dp DebugPort instance.
@param ap_address An instance of either APv1Address or APv2Address.
@return An AccessPort subclass instance.
@exception TargetError Raised if there is not a valid AP for the ap_num.
"""
# Attempt to read the IDR for this APSEL. If we get a zero back then there is
# no AP present, so we return None.
# Check AP version and set the offset to the control and status registers.
idr = dp.read_ap(ap_address.address + ap_address.idr_address)
if idr == 0:
raise exceptions.TargetError("Invalid AP address (%s)" % ap_address)
# Extract IDR fields used for lookup.
designer = (idr & AP_IDR_JEP106_MASK) >> AP_IDR_JEP106_SHIFT
apClass = (idr & AP_IDR_CLASS_MASK) >> AP_IDR_CLASS_SHIFT
variant = (idr & AP_IDR_VARIANT_MASK) >> AP_IDR_VARIANT_SHIFT
apType = idr & AP_IDR_TYPE_MASK
# Get the AccessPort class to instantiate.
key = (designer, apClass, variant, apType)
try:
name, klass, flags = AP_TYPE_MAP[key]
except KeyError:
# The AP ID doesn't match, but we can recognize unknown MEM-APs.
if (apClass == AP_CLASS_MEM_AP) and (designer == AP_JEP106_ARM):
name = "MEM-AP"
klass = MEM_AP
else:
name = None
klass = AccessPort
flags = 0
ap = klass(dp, ap_address, idr, name, flags, cmpid)
ap.init()
return ap
def __init__(
self,
dp: "DebugPort",
ap_address: APAddressBase,
idr: Optional[int] = None,
name: Optional[str] = None,
flags: int = 0,
cmpid: Optional["CoreSightComponentID"] = None
) -> None:
"""@brief AP constructor.
@param self
@param dp The DebugPort object.
@param ap_address APAddress object with address of this AP.
@param idr This AP's IDR register value. If not provided, the IDR will be read by init().
@param name Name for the AP type, such as "AHB5-AP". If not provided, the type name will be
set to "AP".
@param flags Bit mask with extra information about this AP.
"""
self.dp = dp
self.address = ap_address
self._ap_version = ap_address.ap_version
self.idr = idr
self.variant = 0
self.revision = 0
self.ap_class = 0
self.ap_type = 0
self.type_name = name or "AP"
self.rom_addr = 0
self.has_rom_table = False
self.rom_table = None
self.core: Optional["CoreTarget"] = None
self._flags = flags
self._cmpid = cmpid
@property
def description(self):
""" @brief The AP's type and version description.
If the AP is an unknown proprietary type, then only the string "proprietary" is returned.
This property should only be read after init() has be called.
"""
if self.type_name is not None:
return f"{self.type_name} var{self.variant} rev{self.revision}"
else:
return "proprietary"
@property
def short_description(self) -> str:
""" @brief The AP's name and address."""
return self.type_name + str(self.address)
@property
def ap_version(self) -> APVersion:
"""@brief The AP's major version determined by ADI version.
@retval APVersion.APv1
@retval APVersion.APv2
"""
return self._ap_version
@locked
def init(self) -> None:
# Read IDR if it wasn't given to us in the ctor.
if self.idr is None:
self.idr = self.read_reg(self.address.idr_address)
self.variant = (self.idr & AP_IDR_VARIANT_MASK) >> AP_IDR_VARIANT_SHIFT
self.revision = (self.idr & AP_IDR_REVISION_MASK) >> AP_IDR_REVISION_SHIFT
# Get the type name for this AP.
self.ap_class = (self.idr & AP_IDR_CLASS_MASK) >> AP_IDR_CLASS_SHIFT
self.ap_type = self.idr & AP_IDR_TYPE_MASK
def find_components(self) -> None:
"""@brief Find CoreSight components attached to this AP."""
pass
@overload
def read_reg(self, addr: int) -> int:
...
@overload
def read_reg(self, addr: int, now: Literal[True] = True) -> int:
...
@overload
def read_reg(self, addr: int, now: Literal[False]) -> Callable[[], int]:
...
@overload
def read_reg(self, addr: int, now: bool) -> Union[int, Callable[[], int]]:
...
@locked
def read_reg(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]:
return self.dp.read_ap(self.address.address + addr, now)
@locked
def write_reg(self, addr: int, data: int) -> None:
self.dp.write_ap(self.address.address + addr, data)
def lock(self) -> None:
"""@brief Lock the AP from access by other threads."""
self.dp.probe.lock()
def unlock(self) -> None:
"""@brief Unlock the AP."""
self.dp.probe.unlock()
@contextmanager
def locked(self) -> Generator[None, None, None]:
"""@brief Context manager for locking the AP using a with statement.
All public methods of AccessPort and its subclasses are automatically locked, so manual
locking usually is not necessary unless you need to hold the lock across multiple AP
accesses.
"""
self.lock()
yield
self.unlock()
def __repr__(self) -> str:
return "<{}@{:x} {} idr={:08x} rom={:08x}>".format(
self.__class__.__name__, id(self), self.short_description, self.idr, self.rom_addr)
class MEM_AP(AccessPort, memory_interface.MemoryInterface):
"""@brief MEM-AP component.
This class supports MEM-AP v1 and v2.
The bits of HPROT have the following meaning. Not all bits are implemented in all
MEM-APs. AHB-Lite only implements HPROT[3:0].
HPROT[0] = 1 data access, 0 instr fetch<br/>
HPROT[1] = 1 priviledge, 0 user<br/>
HPROT[2] = 1 bufferable, 0 non bufferable<br/>
HPROT[3] = 1 cacheable/modifable, 0 non cacheable<br/>
HPROT[4] = 1 lookupincache, 0 no cache<br/>
HPROT[5] = 1 allocate in cache, 0 no allocate in cache<br/>
HPROT[6] = 1 shareable, 0 non shareable<br/>
Extensions not supported:
- Large Data Extension
- Large Physical Address Extension
- Barrier Operation Extension
"""
def __init__(
self,
dp: "DebugPort",
ap_address: APAddressBase,
idr: Optional[int] = None,
name: Optional[str] = None,
flags: int = 0,
cmpid: Optional["CoreSightComponentID"] = None
) -> None:
super().__init__(dp, ap_address, idr, name, flags, cmpid)
# Check AP version and set the offset to the control and status registers.
if self.ap_version == APVersion.APv1:
self._reg_offset = 0
elif self.ap_version == APVersion.APv2:
self._reg_offset = MEM_APv2_CONTROL_REG_OFFSET
else:
assert False, "Unrecognized AP version %s" % self.ap_version
self._impl_hprot: int = 0
self._impl_hnonsec: int = 0
## Default HPROT value for CSW.
self._hprot: int = HPROT_DATA | HPROT_PRIVILEGED
## Default HNONSEC value for CSW.
self._hnonsec: int = SECURE
## Base CSW value to use.
self._csw: int = DEFAULT_CSW_VALUE
# Certain MEM-APs support a DBGSWEN control in the AP's CSW register. When set to zero, software running
# on the device is prevented from accessing the memory space downstream from the MEM-AP. This feature is
# deprecated in ADIv6, and Arm recommends to never clear the bit when implemented.
if self._flags & AP_DBGSWEN:
self._csw |= CSW_DBGSWEN
## Cached current CSW value.
self._cached_csw: int = -1
## Supported transfer sizes.
self._transfer_sizes: Set[int] = {32}
## Auto-increment wrap modulus.
#
# The AP_4K_WRAP flag indicates a 4 kB wrap size. Otherwise it defaults to the smallest
# size supported by all targets. A size smaller than the supported size will decrease
# performance due to the extra address writes, but will not create any read/write errors.
self.auto_increment_page_size: int = 0x1000 if (self._flags & AP_4K_WRAP) else 0x400
## Number of DAR registers.
self._dar_count: int = 0
## Mask of addresses. This indicates whether 32-bit or 64-bit addresses are supported.
self._address_mask: int = 0xffffffff
## Whether the Large Data extension is supported.
self._has_large_data: bool = False
# Ask the probe for an accelerated memory interface for this AP. If it provides one,
# then bind our memory interface APIs to its methods. Otherwise use our standard
# memory interface based on AP register accesses.
self._accelerated_memory_interface = self.dp.probe.get_memory_interface_for_ap(self.address)
if self._accelerated_memory_interface is not None:
LOG.debug("Using accelerated memory access interface for %s", self.short_description)
self.write_memory = self._accelerated_write_memory
self.read_memory = self._accelerated_read_memory
self.write_memory_block32 = self._accelerated_write_memory_block32
self.read_memory_block32 = self._accelerated_read_memory_block32
self.write_memory_block8 = self._accelerated_write_memory_block8
self.read_memory_block8 = self._accelerated_read_memory_block8
else:
self.write_memory = self._write_memory
self.read_memory = self._read_memory
self.write_memory_block32 = self._write_memory_block32
self.read_memory_block32 = self._read_memory_block32
# Subscribe to reset events.
self.dp.session.subscribe(self._reset_did_occur, (Target.Event.PRE_RESET, Target.Event.POST_RESET))
@property
def supported_transfer_sizes(self) -> Set[int]:
"""@brief Tuple of transfer sizes supported by this AP."""
return self._transfer_sizes
@property
def is_enabled(self) -> bool:
"""@brief Whether any memory transfers are allowed by this AP.
Memory transfers may be disabled by an input signal to the AP. This is often done when debug security
is enabled on the device, to disallow debugger access to internal memory.
"""
return self.is_enabled_for(Target.SecurityState.NONSECURE)
def is_enabled_for(self, security_state: Target.SecurityState) -> bool:
"""@brief Checks whether memory transfers are allowed by this AP for the given security state.
Memory transfers may be disabled by an input signal to the AP. This is often done when debug security
is enabled on the device, to disallow debugger access to internal memory.
@param self The AP instance.
@param security_state One of the @ref pyocd.core.target.Target.SecurityState "SecurityState" enums.
@return Boolean indicating whether memory transfers can be performed in the requested security state. You
may change the security state used for transfers with the hnonsec property and hnonsec_lock() method.
"""
assert isinstance(security_state, Target.SecurityState)
# Call to superclass to read CSW. We want to bypass our CSW cache since the enable signal can change
# asynchronously.
csw = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW)
if security_state is Target.SecurityState.NONSECURE:
# Nonsecure transfers are always allowed when security transfers are enabled.
return (csw & (CSW_DEVICEEN | CSW_SDEVICEEN)) != 0
elif security_state is Target.SecurityState.SECURE:
return (csw & CSW_SDEVICEEN) != 0
else:
assert False, "unsupported security state"
@locked
def init(self) -> None:
"""@brief Initialize the MEM-AP.
This method interrogates the MEM-AP to determine its capabilities, and performs any initial setup
that is required.
It performs these checks:
- Check for Long Address extension.
- Check for Large Data extension.
- (v2 only) Get the auto-increment page size.
- (v2 only) Determine supported error mode.
- (v2 only) Get the size of the DAR register window.
- Determine supported transfer sizes.
- Determine the implemented HPROT and HNONSEC controls.
- Read the ROM table base address.
These controls are configured.
- (v2 only) Configure the error mode.
"""
super().init()
# Read initial CSW. Superclass register access methods are used to avoid the CSW cache.
original_csw = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW)
def _init_cfg() -> None:
"""@brief Read MEM-AP CFG register."""
cfg = self.read_reg(self._reg_offset + MEM_AP_CFG)
# Check for 64-bit address support.
if cfg & MEM_AP_CFG_LA_MASK:
self._address_mask = 0xffffffffffffffff
# Check for Large Data extension.
if cfg & MEM_AP_CFG_LD_MASK:
self._has_large_data = True
# Check v2 MEM-AP CFG fields.
if self.ap_version == APVersion.APv2:
# Set autoinc page size if TARINC is non-zero. Otherwise we've already set the
# default of 1 kB in the ctor.
tarinc = (cfg & MEM_AP_CFG_TARINC_MASK) >> MEM_AP_CFG_TARINC_SHIFT
if tarinc != 0:
self.auto_increment_page_size = 1 << (9 + tarinc)
# Determine supported err mode.
err = (cfg & MEM_AP_CFG_ERR_MASK) >> MEM_AP_CFG_ERR_SHIFT
if err == MEM_AP_CFG_ERR_V1:
# Configure the error mode such that errors are passed upstream, but they don't
# prevent future transactions.
self._csw &= ~(CSW_ERRSTOP | CSW_ERRNPASS)
# Clear TRR in case we attach to a device with a sticky error already set.
self.write_reg(self._reg_offset + MEM_AP_TRR, MEM_AP_TRR_ERR_MASK)
# Init size of DAR register window.
darsize = (cfg & MEM_AP_CFG_DARSIZE_MASK) >> MEM_AP_CFG_DARSIZE_SHIFT
self._dar_count = (1 << darsize) // 4
def _init_transfer_sizes() -> None:
"""@brief Determine supported transfer sizes.
If the #AP_ALL_TX_SZ flag is set, then we know a priori that this AP implementation
supports 8-, 16- and 32- transfer sizes. If the Large Data extension is implemented, then this
flag is ignored.
Note in ADIv6: "If a MEM-AP implementation does not support the Large Data Extension, but does
support various access sizes, it must support word, halfword, and byte accesses."
So, if the Large Data extension is present, then we have to individually test each
transfer size (aside from the required 32-bit).
If Large Data is not present, then only one non-32-bit transfer size needs to be tested to
determine if the AP supports both 8- and 16-bit transfers in addition to the required 32-bit.
"""
# If AP_ALL_TX_SZ is set, we can skip the test. Double check this by ensuring that LD is not
# enabled.
if (self._flags & AP_ALL_TX_SZ) and not self._has_large_data:
self._transfer_sizes = {8, 16, 32}
return
def _test_transfer_size(sz):
"""@brief Utility to verify whether the MEM-AP supports a given transfer size.
From ADIv6:
If the CSW.Size field is written with a value corresponding to a size that is not supported,
or with a reserved value: A read of the field returns a value corresponding to a supported
size.
"""
# Write CSW_SIZE to select requested transfer size.
AccessPort.write_reg(self, self._reg_offset + MEM_AP_CSW, original_csw & ~CSW_SIZE | sz)
# Read back CSW and see if SIZE matches what we wrote.
csw_cb = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW, now=False)
return lambda: (csw_cb() & CSW_SIZE) == sz
# Thus if LD ext is not present, we only need to test one size.
if self._has_large_data:
# Need to scan all sizes except 32-bit, which is required.
SIZES_TO_TEST = (CSW_SIZE8, CSW_SIZE16, CSW_SIZE64, CSW_SIZE128, CSW_SIZE256)
sz_result_cbs = ((sz, _test_transfer_size(sz)) for sz in SIZES_TO_TEST)
self._transfer_sizes = {32} | {(8 * (1 << sz)) for sz, cb in sz_result_cbs if cb()}
# self._transfer_sizes.sort()
elif _test_transfer_size(CSW_SIZE16)():
self._transfer_sizes = {8, 16, 32}
def _init_hprot() -> None:
"""@brief Init HPROT HNONSEC.
Determines the implemented bits of HPROT and HNONSEC in this MEM-AP. The defaults for these
fields of the CSW are based on the implemented bits.
"""
default_hprot = (original_csw & CSW_HPROT_MASK) >> CSW_HPROT_SHIFT
default_hnonsec = (original_csw & CSW_HNONSEC_MASK) >> CSW_HNONSEC_SHIFT
LOG.debug("%s default HPROT=%x HNONSEC=%x", self.short_description, default_hprot, default_hnonsec)
# Now attempt to see which HPROT and HNONSEC bits are implemented.
AccessPort.write_reg(self, self._reg_offset + MEM_AP_CSW,
original_csw | CSW_HNONSEC_MASK | CSW_HPROT_MASK)
csw = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW)
self._impl_hprot = (csw & CSW_HPROT_MASK) >> CSW_HPROT_SHIFT
self._impl_hnonsec = (csw & CSW_HNONSEC_MASK) >> CSW_HNONSEC_SHIFT
LOG.debug("%s implemented HPROT=%x HNONSEC=%x", self.short_description, self._impl_hprot,
self._impl_hnonsec)
# Update current HPROT and HNONSEC, and the current base CSW value.
self.hprot = self._hprot & self._impl_hprot
self.hnonsec = self._hnonsec & self._impl_hnonsec
def _init_rom_table_base() -> None:
"""@brief Read ROM table base address."""
base = self.read_reg(self._reg_offset + MEM_AP_BASE)
is_adiv5_base = (base & AP_BASE_FORMAT_MASK) != 0
is_base_present = (base & AP_BASE_ENTRY_PRESENT_MASK) != 0
is_legacy_base_present = not is_adiv5_base and not is_base_present
if is_legacy_base_present:
self.has_rom_table = True
self.rom_addr = base & AP_BASE_LEGACY_BASEADDR_MASK # clear format and present bits
elif (base == AP_BASE_LEGACY_NOTPRESENT) or (not is_base_present):
self.has_rom_table = False
self.rom_addr = 0
elif is_adiv5_base and is_base_present:
self.has_rom_table = True
self.rom_addr = base & AP_BASE_BASEADDR_MASK # clear format and present bits
else:
raise exceptions.TargetError("invalid AP BASE value 0x%08x" % base)
# Run the init tests.
_init_cfg()
_init_transfer_sizes()
_init_hprot()
_init_rom_table_base()
# Restore unmodified value of CSW.
AccessPort.write_reg(self, self._reg_offset + MEM_AP_CSW, original_csw)
@locked
def find_components(self) -> None:
try:
if self.has_rom_table:
if not self.is_enabled:
LOG.warning("Skipping CoreSight discovery for %s because it is disabled", self.short_description)
return
# Import locally to work around circular import.
from .rom_table import (CoreSightComponentID, ROMTable)
# Read the ROM table component IDs.
cmpid = CoreSightComponentID(None, self, self.rom_addr)
cmpid.read_id_registers()
# Instantiate the ROM table and parse it.
if cmpid.is_rom_table:
self.rom_table = ROMTable.create(self, cmpid, self.rom_addr)
self.rom_table.init()
except exceptions.TransferError as error:
LOG.error("Transfer error while reading %s ROM table: %s", self.short_description, error,
exc_info=self.dp.session.log_tracebacks)
@property
def implemented_hprot_mask(self) -> int:
return self._impl_hprot
@property
def implemented_hnonsec_mask(self) -> int:
return self._impl_hnonsec
@property
def hprot(self) -> int:
return self._hprot
@hprot.setter
@locked
def hprot(self, value: int) -> None:
"""@brief Setter for current HPROT value used for memory transactions.
The bits of HPROT have the following meaning. Not all bits are implemented in all
MEM-APs. AHB-Lite only implements HPROT[3:0].
HPROT[0] = 1 data access, 0 instr fetch<br/>
HPROT[1] = 1 priviledge, 0 user<br/>
HPROT[2] = 1 bufferable, 0 non bufferable<br/>
HPROT[3] = 1 cacheable/modifable, 0 non cacheable<br/>
HPROT[4] = 1 lookup in cache, 0 no cache<br/>
HPROT[5] = 1 allocate in cache, 0 no allocate in cache<br/>
HPROT[6] = 1 shareable, 0 non shareable<br/>
"""
self._hprot = value & (CSW_HPROT_MASK >> CSW_HPROT_SHIFT)
self._csw = ((self._csw & ~CSW_HPROT_MASK)
| (self._hprot << CSW_HPROT_SHIFT))
@property
def hnonsec(self) -> int:
return self._hnonsec
@hnonsec.setter
@locked
def hnonsec(self, value: int) -> None:
"""@brief Setter for current HNONSEC value used for memory transactions.
Not all MEM-APs support control of HNONSEC. In particular, only the AHB5-AP used for
v8-M Cortex-M systems does. The AXI-AP for Cortex-A systems also allows this control.
@param value 0 is secure, 1 is non-secure.
"""
self._hnonsec = value & (CSW_HNONSEC_MASK >> CSW_HNONSEC_SHIFT)
self._csw = ((self._csw & ~CSW_HNONSEC_MASK)
| (self._hnonsec << CSW_HNONSEC_SHIFT))
class _MemAttrContext:
"""@brief Context manager for temporarily setting HPROT and/or HNONSEC.
The AP is locked during the lifetime of the context manager. This means that only the
calling thread can perform memory transactions.
"""
def __init__(self, ap: "MEM_AP", hprot: Optional[int] = None, hnonsec: Optional[int] = None):
self._ap = ap
self._hprot = hprot
self._saved_hprot = None
self._hnonsec = hnonsec
self._saved_hnonsec = None
def __enter__(self) -> "MEM_AP._MemAttrContext":
self._ap.lock()
if self._hprot is not None:
self._saved_hprot = self._ap.hprot
self._ap.hprot = self._hprot
if self._hnonsec is not None:
self._saved_hnonsec = self._ap.hnonsec
self._ap.hnonsec = self._hnonsec
return self
def __exit__(self, exc_type: type, value: Any, traceback: "TracebackType") -> None:
if self._saved_hprot is not None:
self._ap.hprot = self._saved_hprot
if self._saved_hnonsec is not None:
self._ap.hnonsec = self._saved_hnonsec
self._ap.unlock()
def hprot_lock(self, hprot: int) -> _MemAttrContext:
"""@brief Context manager to temporarily change HPROT."""
return self._MemAttrContext(self, hprot=hprot)
def hnonsec_lock(self, hnonsec: int) -> _MemAttrContext:
"""@brief Context manager to temporarily change HNONSEC.
@see secure_lock(), nonsecure_lock()
"""
return self._MemAttrContext(self, hnonsec=hnonsec)
def secure_lock(self) -> _MemAttrContext:
"""@brief Context manager to temporarily set the AP to use secure memory transfers."""
return self.hnonsec_lock(SECURE)
def nonsecure_lock(self) -> _MemAttrContext:
"""@brief Context manager to temporarily set AP to use non-secure memory transfers."""
return self.hnonsec_lock(NONSECURE)
@overload
def read_reg(self, addr: int) -> int:
...
@overload
def read_reg(self, addr: int, now: Literal[True] = True) -> int:
...
@overload
def read_reg(self, addr: int, now: Literal[False]) -> Callable[[], int]:
...
@overload
def read_reg(self, addr: int, now: bool) -> Union[int, Callable[[], int]]:
...
@locked
def read_reg(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]:
ap_regaddr = addr & APREG_MASK
if ap_regaddr == self._reg_offset + MEM_AP_CSW and self._cached_csw != -1 and now:
return self._cached_csw
return self.dp.read_ap(self.address.address + addr, now)
@locked
def write_reg(self, addr: int, data: int) -> None:
ap_regaddr = addr & APREG_MASK
# Don't need to write CSW if it's not changing value.
if ap_regaddr == self._reg_offset + MEM_AP_CSW:
if data == self._cached_csw:
if TRACE.isEnabledFor(logging.INFO):
num = self.dp.next_access_number
TRACE.debug("write_ap:%06d cached (ap=0x%x; addr=0x%08x) = 0x%08x",
num, self.address.nominal_address, addr, data)
return
self._cached_csw = data
try:
self.dp.write_ap(self.address.address + addr, data)
except exceptions.ProbeError:
# Invalidate cached CSW on exception.
if ap_regaddr == self._reg_offset + MEM_AP_CSW:
self._invalidate_cache()
raise
def _invalidate_cache(self) -> None:
"""@brief Invalidate cached registers associated with this AP."""
self._cached_csw = -1
def _reset_did_occur(self, notification: "Notification") -> None:
"""@brief Handles reset notifications to invalidate CSW cache."""
# We clear the cache on all resets just to be safe.
self._invalidate_cache()
@locked
def _write_memory(self, addr: int, data: int, transfer_size: int = 32) -> None:
"""@brief Write a single memory location.
By default the transfer size is a word
@exception TransferError Raised if the requested transfer size is not supported by the AP.
"""
assert (addr & (transfer_size // 8 - 1)) == 0
addr &= self._address_mask
if transfer_size not in self._transfer_sizes:
raise exceptions.TransferError("%d-bit transfers are not supported by %s"
% (transfer_size, self.short_description))
num = self.dp.next_access_number
TRACE.debug("write_mem:%06d (ap=0x%x; addr=0x%08x, size=%d) = 0x%08x {",
num, self.address.nominal_address, addr, transfer_size, data)
self.write_reg(self._reg_offset + MEM_AP_CSW, self._csw | TRANSFER_SIZE[transfer_size])
try:
self.write_reg(self._reg_offset + MEM_AP_TAR, addr)
if transfer_size <= 32:
if transfer_size == 8:
data = data << ((addr & 0x03) << 3)
elif transfer_size == 16:
data = data << ((addr & 0x02) << 3)
self.write_reg(self._reg_offset + MEM_AP_DRW, data)
else:
# Split the value into a tuple of 32-bit words, least-significant first.
data_words = list(((data >> (32 * i)) & 0xffffffff) for i in range(transfer_size // 32))
# Multi-word transfer.
self.dp.write_ap_multiple(self.address.address + self._reg_offset + MEM_AP_DRW, data_words)
except exceptions.TransferFaultError as error:
# Annotate error with target address.
self._handle_error(error, num)
error.fault_address = addr
error.fault_length = transfer_size // 8
raise
except exceptions.Error as error:
self._handle_error(error, num)
raise
TRACE.debug("write_mem:%06d }", num)
@overload
def _read_memory(self, addr: int, transfer_size: int = 32) -> int:
...
@overload
def _read_memory(self, addr: int, transfer_size: int = 32, now: Literal[True] = True) -> int:
...
@overload
def _read_memory(self, addr: int, transfer_size: int, now: Literal[False]) -> Callable[[], int]:
...
@overload
def _read_memory(self, addr: int, transfer_size: int, now: bool) -> Union[int, Callable[[], int]]:
...
@locked
def _read_memory(self, addr: int, transfer_size: int = 32, now: bool = True) -> Union[int, Callable[[], int]]:
"""@brief Read a memory location.
By default, a word will be read.
@exception TransferError Raised if the requested transfer size is not supported by the AP.
"""
assert (addr & (transfer_size // 8 - 1)) == 0
addr &= self._address_mask
if transfer_size not in self._transfer_sizes:
raise exceptions.TransferError("%d-bit transfers are not supported by %s"
% (transfer_size, self.short_description))
num = self.dp.next_access_number
TRACE.debug("read_mem:%06d (ap=0x%x; addr=0x%08x, size=%d) {",
num, self.address.nominal_address, addr, transfer_size)
try:
self.write_reg(self._reg_offset + MEM_AP_CSW, self._csw | TRANSFER_SIZE[transfer_size])
self.write_reg(self._reg_offset + MEM_AP_TAR, addr)
if transfer_size <= 32:
result_cb = self.read_reg(self._reg_offset + MEM_AP_DRW, now=False)
else:
# Multi-word transfer.
result_cb_mw = self.dp.read_ap_multiple(self.address.address + self._reg_offset + MEM_AP_DRW,
transfer_size // 32, now=False)
except exceptions.TransferFaultError as error:
# Annotate error with target address.
self._handle_error(error, num)
error.fault_address = addr
error.fault_length = transfer_size // 8
raise
except exceptions.Error as error:
self._handle_error(error, num)
raise
def read_mem_cb() -> int:
try:
if transfer_size <= 32:
res = result_cb()
if transfer_size == 8:
res = (res >> ((addr & 0x03) << 3) & 0xff)
elif transfer_size == 16:
res = (res >> ((addr & 0x02) << 3) & 0xffff)
else:
res_mw = result_cb_mw()
res = sum((w << (32 * i)) for i, w in enumerate(res_mw))
TRACE.debug("read_mem:%06d %s(ap=0x%x; addr=0x%08x, size=%d) -> 0x%08x }",
num, "" if now else "...", self.address.nominal_address, addr, transfer_size, res)
return res
except exceptions.TransferFaultError as error:
# Annotate error with target address.
self._handle_error(error, num)
error.fault_address = addr
error.fault_length = transfer_size // 8
raise
except exceptions.Error as error:
self._handle_error(error, num)
raise
if now:
result = read_mem_cb()
return result
else:
return read_mem_cb
def _write_block32_page(self, addr: int, data: Sequence[int]) -> None:
"""@brief Write a single transaction's worth of aligned words.
The transaction must not cross the MEM-AP's auto-increment boundary.
This method is not locked because it is only called by _write_memory_block32(), which is locked.
"""
assert (addr & 0x3) == 0
num = self.dp.next_access_number
TRACE.debug("_write_block32:%06d (ap=0x%x; addr=0x%08x, size=%d) {",
num, self.address.nominal_address, addr, len(data))
# put address in TAR
self.write_reg(self._reg_offset + MEM_AP_CSW, self._csw | CSW_SIZE32)
self.write_reg(self._reg_offset + MEM_AP_TAR, addr)
try:
self.dp.write_ap_multiple(self.address.address + self._reg_offset + MEM_AP_DRW, data)
except exceptions.TransferFaultError as error:
# Annotate error with target address.
self._handle_error(error, num)
error.fault_address = addr
error.fault_length = len(data) * 4
raise
except exceptions.Error as error:
self._handle_error(error, num)
raise
TRACE.debug("_write_block32:%06d }", num)
def _read_block32_page(self, addr: int, size: int) -> Sequence[int]:
"""@brief Read a single transaction's worth of aligned words.
The transaction must not cross the MEM-AP's auto-increment boundary.
This method is not locked because it is only called by _read_memory_block32(), which is locked.
"""
assert (addr & 0x3) == 0
num = self.dp.next_access_number
TRACE.debug("_read_block32:%06d (ap=0x%x; addr=0x%08x, size=%d) {",
num, self.address.nominal_address, addr, size)
# put address in TAR
self.write_reg(self._reg_offset + MEM_AP_CSW, self._csw | CSW_SIZE32)
self.write_reg(self._reg_offset + MEM_AP_TAR, addr)
try:
resp = self.dp.read_ap_multiple(self.address.address + self._reg_offset + MEM_AP_DRW, size)
except exceptions.TransferFaultError as error:
# Annotate error with target address.
self._handle_error(error, num)
error.fault_address = addr
error.fault_length = size * 4
raise
except exceptions.Error as error:
self._handle_error(error, num)
raise
TRACE.debug("_read_block32:%06d }", num)
return resp
@locked
def _write_memory_block32(self, addr: int, data: Sequence[int]) -> None:
"""@brief Write a block of aligned words in memory."""
assert (addr & 0x3) == 0
addr &= self._address_mask
size = len(data)
while size > 0:
n = self.auto_increment_page_size - (addr & (self.auto_increment_page_size - 1))
if size*4 < n:
n = (size*4) & 0xfffffffc
self._write_block32_page(addr, data[:n//4])
data = data[n//4:]
size -= n//4
addr += n
return
@locked
def _read_memory_block32(self, addr: int, size: int) -> Sequence[int]:
"""@brief Read a block of aligned words in memory.
@return A list of word values.
"""
assert (addr & 0x3) == 0
addr &= self._address_mask
resp = []
while size > 0:
n = self.auto_increment_page_size - (addr & (self.auto_increment_page_size - 1))
if size*4 < n:
n = (size*4) & 0xfffffffc
resp += self._read_block32_page(addr, n//4)
size -= n//4
addr += n
return resp
@locked
def _accelerated_write_memory(self, addr: int, data: int, transfer_size: int=32) -> None:
"""@brief Write one memory location using the probe's accelerated memory interface.
The current CSW value is passed to the accelerted interface, primarily for STLink.
"""
assert self._accelerated_memory_interface is not None
self._accelerated_memory_interface.write_memory(addr, data, transfer_size,
csw=self._csw)
@locked
def _accelerated_read_memory(self, addr: int, transfer_size: int=32, now: bool=True) \
-> Union[int, Callable[[], int]]:
"""@brief Read one memory location using the probe's accelerated memory interface.
The current CSW value is passed to the accelerted interface, primarily for STLink.
"""
assert self._accelerated_memory_interface is not None
return self._accelerated_memory_interface.read_memory(addr, transfer_size, now,
csw=self._csw)
@locked
def _accelerated_write_memory_block32(self, addr: int, data: Sequence[int]) -> None:
"""@brief Write a memory block using the probe's accelerated memory interface.
The current CSW value is passed to the accelerted interface, primarily for STLink.
"""
assert self._accelerated_memory_interface is not None
self._accelerated_memory_interface.write_memory_block32(addr, data,
csw=self._csw)
@locked
def _accelerated_read_memory_block32(self, addr: int, size: int) -> Sequence[int]:
"""@brief Read a memory block using the probe's accelerated memory interface.
The current CSW value is passed to the accelerted interface, primarily for STLink.
"""
assert self._accelerated_memory_interface is not None
return self._accelerated_memory_interface.read_memory_block32(addr, size,
csw=self._csw)
@locked
def _accelerated_write_memory_block8(self, addr: int, data: Sequence[int]) -> None:
"""@brief Write a memory block using the probe's accelerated memory interface.
The current CSW value is passed to the accelerted interface, primarily for STLink.
"""
assert self._accelerated_memory_interface is not None
self._accelerated_memory_interface.write_memory_block8(addr, data,
csw=self._csw)
@locked
def _accelerated_read_memory_block8(self, addr: int, size: int) -> Sequence[int]:
"""@brief Read a memory block using the probe's accelerated memory interface.
The current CSW value is passed to the accelerted interface, primarily for STLink.
"""
assert self._accelerated_memory_interface is not None
return self._accelerated_memory_interface.read_memory_block8(addr, size,
csw=self._csw)
def _handle_error(self, error: Exception, num: int) -> None:
self.dp._handle_error(error, num)
self._invalidate_cache()
class AHB_AP(MEM_AP):
"""@brief AHB-AP access port subclass.
This subclass checks for the AP_MSTRTYPE flag, and if set configures that field in the CSW
register to use debugger transactions. Only the M3 and M4 AHB-AP implements MSTRTYPE.
Another AHB-AP specific addition is that an attempt is made to set the TRCENA bit in the DEMCR
register before reading the ROM table. This is required on some Cortex-M devices, otherwise
certain ROM table entries will read as zeroes or other garbage.
"""
@locked
def init(self) -> None:
super().init()
# Check for and enable the Master Type bit on AHB-APs where it might be implemented.
if self._flags & AP_MSTRTYPE:
self._init_mstrtype()
def _init_mstrtype(self) -> None:
"""@brief Set master type control in CSW.
Only the v1 AHB-AP from Cortex-M3 and Cortex-M4 implements the MSTRTYPE flag to control
whether transactions appear as debugger or internal accesses.
"""
# Set the master type to "debugger" for AP's that support this field.
self._csw |= CSW_MSTRDBG
def find_components(self) -> None:
# Turn on DEMCR.TRCENA before reading the ROM table. Some ROM table entries can
# come back as garbage if TRCENA is not set.
try:
demcr = self.read32(DEMCR)
self.write32(DEMCR, demcr | DEMCR_TRCENA)
self.dp.flush()
except exceptions.TransferError:
# Ignore exception and read whatever we can of the ROM table.
pass
# Invoke superclass.
super().find_components()
## @brief Arm JEP106 code
#
# - [6:0] = 0x3B, Arm's JEP106 identification code
# - [12:7] = 4, the number of JEP106 continuation codes for Arm
AP_JEP106_ARM = 0x23b
## @brief Arm China JEP106 code
#
# - [6:0] = 0x75, JEP106 identification code
# - [12:7] = 10, number of JEP106 continuation codes
AP_JEP106_ARM_CHINA = 0x575
# AP classes
AP_CLASS_JTAG_AP = 0x0
AP_CLASS_COM_AP = 0x1 # SDC-600 (Chaucer)
AP_CLASS_MEM_AP = 0x8 # AHB-AP, APB-AP, AXI-AP
# MEM-AP type constants
AP_TYPE_AHB = 0x1
AP_TYPE_APB = 0x2
AP_TYPE_AXI = 0x4
AP_TYPE_AHB5 = 0x5
AP_TYPE_APB4 = 0x6
AP_TYPE_AXI5 = 0x7
AP_TYPE_AHB5_HPROT = 0x8
# AP flags.
AP_4K_WRAP = 0x1 # The AP has a 4 kB auto-increment modulus.
AP_ALL_TX_SZ = 0x2 # The AP is known to support 8-, 16-, and 32-bit transfers, *unless* Large Data is implemented.
AP_MSTRTYPE = 0x4 # The AP is known to support the MSTRTYPE field.
AP_DBGSWEN = 0x8 # The AP is known to support the DBGSWEN flag.
## Map from AP IDR fields to AccessPort subclass.
#
# The dict maps from a 4-tuple of (JEP106 code, AP class, variant, type) to 3-tuple (name, class, flags).
#
# Known AP IDRs:
# 0x24770011 AHB-AP with 0x1000 wrap and MSTRTYPE
# Used on m4 & m3 - Documented in arm_cortexm4_processor_trm_100166_0001_00_en.pdf
# and arm_cortexm3_processor_trm_100165_0201_00_en.pdf
# 0x34770001 AHB-AP Documented in DDI0314H_coresight_components_trm.pdf
# 0x44770001 AHB-AP Used on m1 - Documented in DDI0413D_cortexm1_r1p0_trm.pdf
# 0x04770031 AHB-AP Used on m0+? at least on KL25Z, KL46, LPC812
# 0x04770021 AHB-AP Used on m0? used on nrf51, lpc11u24
# 0x04770041 AHB-AP Used on m7, RT1050
# 0x64770001 AHB-AP Used on m7, documented in DDI0480G_coresight_soc_trm.pdf
# 0x74770001 AHB-AP Used on m0+ on KL28Z
# 0x84770001 AHB-AP Used on K32W042
# 0x14770005 AHB5-AP Used on M33. Note that M33 r0p0 incorrect fails to report this IDR.
# 0x04770025 AHB5-AP Used on M23.
# 0x54770002 APB-AP used on STM32H743, from CSSoC-400
# 0x34770017 AXI5-AP from Corstone-700
AP_TYPE_MAP: Dict[Tuple[int, int, int, int], Tuple[str, Type[AccessPort], int]] = {
# |JEP106 |Class |Var|Type |Name |Class
(AP_JEP106_ARM, AP_CLASS_JTAG_AP, 0, 0): ("JTAG-AP", AccessPort, 0 ),
(AP_JEP106_ARM, AP_CLASS_COM_AP, 0, 0): ("SDC-600", AccessPort, 0 ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_AHB): ("AHB-AP", AHB_AP, AP_ALL_TX_SZ ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 1, AP_TYPE_AHB): ("AHB-AP", AHB_AP, AP_ALL_TX_SZ|AP_4K_WRAP|AP_MSTRTYPE ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 2, AP_TYPE_AHB): ("AHB-AP", AHB_AP, AP_ALL_TX_SZ ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 3, AP_TYPE_AHB): ("AHB-AP", AHB_AP, AP_ALL_TX_SZ ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 4, AP_TYPE_AHB): ("AHB-AP", AHB_AP, AP_ALL_TX_SZ ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_APB): ("APB-AP", MEM_AP, AP_DBGSWEN ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_AXI): ("AXI-AP", MEM_AP, AP_ALL_TX_SZ ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_AHB5): ("AHB5-AP", AHB_AP, AP_ALL_TX_SZ ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 1, AP_TYPE_AHB5): ("AHB5-AP", AHB_AP, AP_ALL_TX_SZ ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 2, AP_TYPE_AHB5): ("AHB5-AP", AHB_AP, AP_ALL_TX_SZ ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_APB4): ("APB4-AP", MEM_AP, 0 ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_AXI5): ("AXI5-AP", MEM_AP, AP_ALL_TX_SZ ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 1, AP_TYPE_AXI5): ("AXI5-AP", MEM_AP, AP_ALL_TX_SZ ),
(AP_JEP106_ARM, AP_CLASS_MEM_AP, 0, AP_TYPE_AHB5_HPROT): ("AHB5-AP", MEM_AP, AP_ALL_TX_SZ ),
(AP_JEP106_ARM_CHINA,
AP_CLASS_MEM_AP, 1, AP_TYPE_AHB5): ("AHB5-AP", AHB_AP, AP_ALL_TX_SZ ),
}
|
cc6cd335f4ee015f38da41253900d995ae381064
|
05b0c763ab92086e69a8d00ae6465009c596f6bc
|
/tests/cpu/test_runtime_api_jit.py
|
edb76de82ded893386496b393d8dda6ce86ed913
|
[
"Apache-2.0"
] |
permissive
|
intel/intel-extension-for-pytorch
|
60ce2af2ec3a1dacae0d0db13dd51a5b44512e61
|
7f9266789de7ca9d8bcf55606f3204f1a3640640
|
refs/heads/master
| 2023-09-01T09:13:16.866410
| 2023-08-31T08:00:37
| 2023-08-31T08:00:37
| 256,061,008
| 991
| 144
|
Apache-2.0
| 2023-08-13T13:56:07
| 2020-04-15T23:35:29
|
Python
|
UTF-8
|
Python
| false
| false
| 34,536
|
py
|
test_runtime_api_jit.py
|
import unittest
import torch
import intel_extension_for_pytorch as ipex
from torch.testing._internal.jit_utils import JitTestCase
from test_ao_jit_llga_utils import JitLlgaTestCase
from test_runtime_api import TestInputOutputModule
from common_ipex_conf import runtime_thread_affinity_test_env
class SimpleNet(torch.nn.Module):
def __init__(self):
super(SimpleNet, self).__init__()
self.conv = torch.nn.Conv2d(
64, 128, (3, 3), stride=(2, 2), padding=(1, 1), bias=False
)
def forward(self, x):
x1 = self.conv(x)
y = torch.flatten(x1, start_dim=1)
return y
class SimpleNet_v2(torch.nn.Module):
def __init__(self):
super(SimpleNet_v2, self).__init__()
self.conv = torch.nn.Conv2d(
3, 64, (3, 3), stride=(2, 2), padding=(1, 1), bias=False
)
self.conv2 = torch.nn.Conv2d(
64, 64, (3, 3), stride=(2, 2), padding=(1, 1), bias=False
)
def forward(self, x):
x1 = self.conv(x)
x1 = self.conv2(x1)
y = torch.flatten(x1, start_dim=1)
return y
class TestJitRuntimeAPI(JitTestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_async_api_fp32_jit_model(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
trace_model = torch.jit.trace(model, x)
y = trace_model(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(trace_model, cpu_pool)
# Task submit and get
y_runtime_future = task(x)
y_runtime = y_runtime_future.get()
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_sync_api_fp32_jit_model(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
trace_mode = torch.jit.trace(model, x)
y = trace_mode(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(trace_mode, cpu_pool)
# Task sync run
y_runtime = task.run_sync(x)
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_async_api_bf16_jit_model(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_mode = torch.jit.trace(model, x)
y = trace_mode(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(trace_mode, cpu_pool)
# Task submit and wait
y_runtime_future = task(x)
y_runtime = y_runtime_future.get()
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_async_api_bf16_jit_model_multi_submission(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_mode = torch.jit.trace(model, x)
y = trace_mode(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(trace_mode, cpu_pool)
# Submit task 3 times, then wait for result
y_runtime = []
y_runtime_future = []
for i in range(3):
y_runtime_future.append(task(x))
for item in y_runtime_future:
y_runtime.append(item.get())
self.assertEqual(y, y_runtime[0])
self.assertEqual(y, y_runtime[1])
self.assertEqual(y, y_runtime[2])
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_copy_bf16_jit_mode(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_mode = torch.jit.trace(model, x)
y = trace_mode(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(trace_mode, cpu_pool)
# Copy Task
task2 = task
# Task submit and wait
y_runtime_future = task(x)
y_runtime = y_runtime_future.get()
y_runtime_future2 = task2(x)
y_runtime2 = y_runtime_future2.get()
self.assertEqual(y, y_runtime)
self.assertEqual(y, y_runtime2)
class TestJITMultiStreamModule(JitTestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_module_bf16_jit_model(self):
model = SimpleNet()
model.eval()
cpu_pool = ipex.cpu.runtime.CPUPool()
batch_size = cpu_pool.core_ids.__len__()
x = torch.rand(batch_size, 64, 3, 3)
num_streams = batch_size
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_model = torch.jit.trace(model, x)
y = trace_model(x)
# Create MultiStreamModule
cpu_pool = ipex.cpu.runtime.CPUPool()
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
trace_model, num_streams=num_streams, cpu_pool=cpu_pool
)
y_runtime = multi_stream_model(x)
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_module_bf16_jit_model_concat_output(self):
model = SimpleNet()
model.eval()
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
batch_size = cpu_pool.core_ids.__len__()
x = torch.rand(batch_size, 64, 3, 3)
num_streams = batch_size
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_model = torch.jit.trace(model, x)
# Create MultiStreamModule
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
trace_model, num_streams=num_streams, cpu_pool=cpu_pool
)
y_runtime = multi_stream_model(x)
# Create MultiStreamModule with concat_output=False
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
trace_model, num_streams=num_streams, cpu_pool=cpu_pool, concat_output=False
)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y_runtime2.__len__(), num_streams)
self.assertEqual(y_runtime, torch.cat(y_runtime2))
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_single_stream_module_bf16_jit_model(self):
model = SimpleNet()
model.eval()
batch_size = ipex.cpu.runtime.get_core_list_of_node_id(0).__len__()
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_model = torch.jit.trace(model, x)
y = trace_model(x)
# Create MultiStreamModule
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
trace_model, num_streams=1, cpu_pool=cpu_pool
)
y_runtime = multi_stream_model(x)
# Create MultiStreamModule with concat_output=False
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
trace_model, num_streams=1, cpu_pool=cpu_pool, concat_output=False
)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, y_runtime2[0])
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_core_number_not_divisible_stream_number_bf16_jit_model(self):
model = SimpleNet()
model.eval()
num_streams = 2
batch_size = num_streams
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
# Calculate the reference result
y = traced_model(x)
# Create MultiStreamModule
# Core Number is 3, stream Number is 2
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
traced_model, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
traced_model,
num_streams=num_streams,
cpu_pool=cpu_pool,
concat_output=False,
)
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_batchsize_less_than_stream_number_bf16_jit_model(self):
model = SimpleNet()
model.eval()
num_streams = 3
batch_size = 2
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
# Calculate the reference result
y = traced_model(x)
# Create MultiStreamModule
# Batchsize 2, Core Number is 3, stream Number is 3
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
traced_model, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
traced_model,
num_streams=num_streams,
cpu_pool=cpu_pool,
concat_output=False,
)
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_batchsize_not_divisible_stream_number_bf16_jit_model(self):
model = SimpleNet()
model.eval()
num_streams = 3
batch_size = 4
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
# Calculate the reference result
y = traced_model(x)
# Create MultiStreamModule
# Batchsize 4, Core Number is 3, stream Number is 3
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
traced_model, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
traced_model,
num_streams=num_streams,
cpu_pool=cpu_pool,
concat_output=False,
)
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
self.assertEqual(y_runtime2[0].size(0), 2)
self.assertEqual(y_runtime2[1].size(0), 1)
self.assertEqual(y_runtime2[2].size(0), 1)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_stream_number_auto_bf16_jit_model(self):
model = torch.nn.Softmax(dim=-1)
model.eval()
for i in range(ipex.cpu.runtime.get_core_list_of_node_id(0).__len__()):
batch_size = list(range(i + 1)).__len__()
x = torch.rand(batch_size, 64)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
# Warm Up
for _ in range(3):
traced_model(x)
# Calculate the reference result
y = traced_model(x)
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=list(range(i + 1)))
# The stream number will be determined automatically.
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
traced_model, cpu_pool=cpu_pool
)
y_runtime = multi_stream_model(x)
stream_num_ground_truth = ipex.cpu.runtime.get_default_num_streams(cpu_pool)
self.assertEqual(y, y_runtime)
self.assertEqual(
multi_stream_model.get_stream_number(), stream_num_ground_truth
)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_stream_number_larger_than_core_number(self):
model = torch.nn.Softmax(dim=-1)
model.eval()
cpu_pool = ipex.cpu.runtime.CPUPool()
batch_size = cpu_pool.core_ids.__len__()
num_streams = batch_size + 1
x = torch.rand(batch_size, 64)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
# Warm Up
for _ in range(3):
traced_model(x)
# Calculate the reference result
y = traced_model(x)
# The stream number will be determined automatically.
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
traced_model, num_streams=num_streams, cpu_pool=cpu_pool
)
y_runtime = multi_stream_model(x)
stream_num_ground_truth = ipex.cpu.runtime.get_default_num_streams(cpu_pool)
self.assertEqual(y, y_runtime)
self.assertEqual(
multi_stream_model.get_stream_number(), cpu_pool.core_ids.__len__()
)
class TestLLGARuntimeAPI(JitLlgaTestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_async_api_int8_jit_model(self):
with torch.no_grad():
model = SimpleNet_v2()
model.eval()
x = torch.rand(2, 3, 224, 224).contiguous(memory_format=torch.channels_last)
# Calculate the reference result
graph, m_llga, m_cpu = self.prepareModel(model, [x])
y = m_llga(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(m_llga, cpu_pool)
# Task submit and wait
y_runtime_future = task(x)
y_runtime = y_runtime_future.get()
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_module_int8_jit_model(self):
with torch.no_grad():
model = SimpleNet_v2()
model.eval()
x = torch.rand(2, 3, 224, 224).contiguous(memory_format=torch.channels_last)
# Calculate the reference result
graph, m_llga, m_cpu = self.prepareModel(model, [x])
y = m_llga(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
m_llga, num_streams=1, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
m_llga, num_streams=1, cpu_pool=cpu_pool, concat_output=False
)
# Task submit and wait
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_core_number_not_divisible_stream_number_int8_jit_model(self):
with torch.no_grad():
model = SimpleNet_v2()
model.eval()
num_streams = 2
batch_size = num_streams
x = torch.rand(batch_size, 3, 16, 16).contiguous(
memory_format=torch.channels_last
)
# Calculate the reference result
graph, m_llga, m_cpu = self.prepareModel(model, [x])
y = m_llga(x)
# Create MultiStreamModule
# Core Number is 3, stream Number is 2
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
m_llga, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
m_llga, num_streams=num_streams, cpu_pool=cpu_pool, concat_output=False
)
# Task submit and wait
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_batchsize_less_than_stream_number_int8_jit_model(self):
with torch.no_grad():
model = SimpleNet_v2()
model.eval()
num_streams = 3
batch_size = 2
x = torch.rand(batch_size, 3, 16, 16).contiguous(
memory_format=torch.channels_last
)
# Calculate the reference result
graph, m_llga, m_cpu = self.prepareModel(model, [x])
y = m_llga(x)
# Create MultiStreamModule
# Batchsize is 2, Core Number is 3, stream Number is 3
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
m_llga, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
m_llga, num_streams=num_streams, cpu_pool=cpu_pool, concat_output=False
)
# Task submit and wait
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
self.assertEqual(y_runtime2.__len__(), batch_size)
class TestMultiStreamModuleHint(JitTestCase):
def init_set_up(self):
# Create Multi Stream Module without concat output
cpu_pool = ipex.cpu.runtime.CPUPool()
batch_size = cpu_pool.core_ids.__len__()
num_streams = cpu_pool.core_ids.__len__()
return batch_size, num_streams, cpu_pool
def create_jit_traced_model(self, model, input):
traced_model = torch.jit.trace(model, input).eval()
traced_model = torch.jit.freeze(traced_model)
return traced_model
def create_multi_stream_module(
self,
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint=None,
concat_output=True,
):
if not concat_output:
return ipex.cpu.runtime.MultiStreamModule(
traced_model,
num_streams=num_streams,
cpu_pool=cpu_pool,
concat_output=False,
input_split_hint=multi_stream_input_hint,
)
else:
return ipex.cpu.runtime.MultiStreamModule(
traced_model,
num_streams=num_streams,
cpu_pool=cpu_pool,
input_split_hint=multi_stream_input_hint,
output_concat_hint=multi_stream_output_hint,
)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_input_output_hint(self):
batch_size, num_streams, cpu_pool = self.init_set_up()
# This module:
# * Accept 3 tensors as input
# * Return a tuple of 3 tensors as output
model = TestInputOutputModule().eval()
for batch_size in (num_streams - 1, num_streams):
# There is test for when batch_size is less than num_streams
input_tensor1 = torch.rand(batch_size, 1)
input_tensor2 = torch.rand(batch_size, 1)
input_tensor3 = torch.rand(batch_size, 1)
# Since jit trace only accept single tensor or a tuple of tensors as input
# https://pytorch.org/docs/stable/generated/torch.jit.trace.html#torch-jit-trace
jit_input = (input_tensor1, input_tensor2, input_tensor3)
traced_model = self.create_jit_traced_model(model, jit_input)
# Warm Up in the main thread to finish the jit pass optimizations
for _ in range(3):
traced_model(input_tensor1, input_tensor2, input_tensor3)
# Calculate the reference result
y_ref = traced_model(input_tensor1, input_tensor2, input_tensor3)
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(0, 0, 0)
multi_stream_model = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
concat_output=False,
)
y_runtime = multi_stream_model(input_tensor1, input_tensor2, input_tensor3)
# Manually concat the output
y_runtime_res1 = []
y_runtime_res2 = []
y_runtime_res3 = []
for stream_id in range(
num_streams if ((batch_size // num_streams) >= 1) else batch_size
):
y_runtime_res1.append(y_runtime[stream_id][0])
y_runtime_res2.append(y_runtime[stream_id][1])
y_runtime_res3.append(y_runtime[stream_id][2])
y_runtime_res = (
torch.cat(y_runtime_res1),
torch.cat(y_runtime_res2),
torch.cat(y_runtime_res3),
)
self.assertEqual(y_ref, y_runtime_res)
# Create Multi Stream Module with concat output
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint((0, 0, 0))
multi_stream_model2 = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint,
concat_output=True,
)
y_runtime_res2 = multi_stream_model2(
input_tensor1, input_tensor2, input_tensor3
)
self.assertEqual(y_ref, y_runtime_res2)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_simulate_bert_large_input_output(self):
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, key1, key2, key3):
return key1 * 2, key2 * 2
# This module simulates the behaviour of Bert Large LZ models:
# * Accept 3 tensors (with key word) as input
# * Return a tuple of 2 tensors as output
model = TestModule().eval()
batch_size, num_streams, cpu_pool = self.init_set_up()
jit_input = (
torch.rand(batch_size, 1),
torch.rand(batch_size, 2),
torch.rand(batch_size, 3),
)
traced_model = self.create_jit_traced_model(model, jit_input)
input_tensor1 = torch.rand(batch_size, 1)
input_tensor2 = torch.rand(batch_size, 1)
input_tensor3 = torch.rand(batch_size, 1)
# Warm Up
for _ in range(3):
traced_model(key1=input_tensor1, key2=input_tensor2, key3=input_tensor3)
# Calculate the reference result
y_ref = traced_model(key1=input_tensor1, key2=input_tensor2, key3=input_tensor3)
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(
key1=0, key2=0, key3=0
)
multi_stream_model = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
concat_output=False,
)
y_runtime = multi_stream_model(
key1=input_tensor1, key2=input_tensor2, key3=input_tensor3
)
# Manually Concat the output
y_runtime_res1 = []
y_runtime_res2 = []
for i in range(num_streams):
y_runtime_res1.append(y_runtime[i][0])
y_runtime_res2.append(y_runtime[i][1])
y_runtime_res = (torch.cat(y_runtime_res1), torch.cat(y_runtime_res2))
self.assertEqual(y_ref, y_runtime_res)
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint((0, 0))
multi_stream_model2 = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint,
concat_output=True,
)
y_runtime_res2 = multi_stream_model2(
key1=input_tensor1, key2=input_tensor2, key3=input_tensor3
)
self.assertEqual(y_ref, y_runtime_res2)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_mix_position_keyword_input_output_hint(self):
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, param1, param2, key1=None):
return param1, param2, key1
batch_size, num_streams, cpu_pool = self.init_set_up()
# This module simulates the behaviour of Bert Large LZ models:
# * Accept 3 tensors (2 position parameter and 1 key word parameter) as input
# * Return a tuple of 3 tensors as output
model = TestModule().eval()
jit_input = (
torch.rand(batch_size, 1),
torch.rand(batch_size, 2),
torch.rand(batch_size, 3),
)
traced_model = self.create_jit_traced_model(model, jit_input)
input_tensor1 = torch.rand(batch_size, 1)
input_tensor2 = torch.rand(batch_size, 2)
input_tensor3 = torch.rand(batch_size, 3)
input = (input_tensor1, input_tensor2)
k_input = {"key1": input_tensor3}
# Warm Up
for _ in range(3):
traced_model(input_tensor1, input_tensor2, key1=input_tensor3)
# Calculate the reference result
y_ref = traced_model(*input, **k_input)
y_ref2 = traced_model(input_tensor1, input_tensor2, input_tensor3)
y_ref3 = traced_model(input_tensor1, input_tensor2, key1=input_tensor3)
self.assertEqual(y_ref, y_ref2)
self.assertEqual(y_ref, y_ref3)
# Be careful, jit traced model will change the input type
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(0, 0, key1=0)
# Create Multi Stream Module with concat output
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint((0, 0, 0))
multi_stream_model = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint,
concat_output=True,
)
# There are 2 ways to write now
y_runtime_res = multi_stream_model(
input_tensor1, input_tensor2, key1=input_tensor3
)
y_runtime_res2 = multi_stream_model(*input, **k_input)
self.assertEqual(y_ref, y_runtime_res)
self.assertEqual(y_ref, y_runtime_res2)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_input_output_hint_not_along_dim_zero(self):
batch_size, num_streams, cpu_pool = self.init_set_up()
# This module:
# * Accept 3 tensors as input
# * Return a tuple of 3 tensors as output
model = TestInputOutputModule().eval()
input_tensor1 = torch.rand(1, batch_size)
input_tensor2 = torch.rand(batch_size, 2)
input_tensor3 = torch.rand(3, batch_size)
# Since jit trace only accept single tensor or a tuple of tensors as input
# https://pytorch.org/docs/stable/generated/torch.jit.trace.html#torch-jit-trace
jit_input = (input_tensor1, input_tensor2, input_tensor3)
traced_model = self.create_jit_traced_model(model, jit_input)
# Warm Up in the main thread to finish the jit pass optimizations
for _ in range(3):
traced_model(input_tensor1, input_tensor2, input_tensor3)
# Calculate the reference result
y_ref = traced_model(input_tensor1, input_tensor2, input_tensor3)
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(1, 0, 1)
multi_stream_model = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
concat_output=False,
)
y_runtime = multi_stream_model(input_tensor1, input_tensor2, input_tensor3)
# Manually concat the output
y_runtime_res1 = []
y_runtime_res2 = []
y_runtime_res3 = []
for stream_id in range(
num_streams if ((batch_size // num_streams) >= 1) else batch_size
):
y_runtime_res1.append(y_runtime[stream_id][0])
y_runtime_res2.append(y_runtime[stream_id][1])
y_runtime_res3.append(y_runtime[stream_id][2])
y_runtime_res = (
torch.cat(y_runtime_res1, 1),
torch.cat(y_runtime_res2, 0),
torch.cat(y_runtime_res3, 1),
)
self.assertEqual(y_ref, y_runtime_res)
# Create Multi Stream Module with concat output
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint((1, 0, 1))
multi_stream_model2 = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint,
concat_output=True,
)
y_runtime_res2 = multi_stream_model2(
input_tensor1, input_tensor2, input_tensor3
)
self.assertEqual(y_ref, y_runtime_res2)
class TestMultiStreamBenchmarkModule(JitTestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_benchmark_module_bf16_jit_model(self):
model = SimpleNet().eval()
batch_size = 1
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_model = torch.jit.trace(model, x)
# Warm Up
for _ in range(3):
trace_model(x)
# Create MultiStreamModule
multi_stream_model = ipex.cpu.runtime._MultiStreamBenchmarkModule(trace_model)
multi_stream_model(x)
if __name__ == "__main__":
test = unittest.main()
|
183621c1fe2a60458bf8091aad4db6a4ebce5748
|
48ca6f9f041a1e9f563500c8a7fa04dbb18fa949
|
/docs/manual/gears/examples/mul.py
|
3314b206de649e3fbf4ef21a64b2566e84e3132a
|
[
"MIT"
] |
permissive
|
bogdanvuk/pygears
|
71404e53d4689ec9cdd9db546bfc0f229a7e02da
|
705b11ab6de79868b25753fa9d0ce7128791b346
|
refs/heads/master
| 2023-07-08T11:38:54.625172
| 2022-03-07T12:29:00
| 2022-03-07T12:29:00
| 124,890,922
| 146
| 16
|
MIT
| 2022-08-15T07:57:08
| 2018-03-12T13:10:06
|
Python
|
UTF-8
|
Python
| false
| false
| 168
|
py
|
mul.py
|
from pygears.lib import drv, check
from pygears.typing import Uint
a = drv(t=Uint[4], seq=[0, 1, 2])
b = drv(t=Uint[4], seq=[0, 1, 2])
(a * b) | check(ref=[0, 1, 4])
|
5c0c3fee62039aac2fd9d316a1772944cf4e74ad
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/SQLAlchemy/sqlalchemy/dialects/sqlite/base.pyi
|
31efadb6c92076cc3f8d26e78242d1e4afe35e20
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 5,816
|
pyi
|
base.pyi
|
from typing import Any
import sqlalchemy.types as sqltypes
from ...engine import default
from ...sql import compiler
from ...types import (
BLOB as BLOB,
BOOLEAN as BOOLEAN,
CHAR as CHAR,
DECIMAL as DECIMAL,
FLOAT as FLOAT,
INTEGER as INTEGER,
NUMERIC as NUMERIC,
REAL as REAL,
SMALLINT as SMALLINT,
TEXT as TEXT,
TIMESTAMP as TIMESTAMP,
VARCHAR as VARCHAR,
)
from .json import JSON as JSON
class _SQliteJson(JSON):
def result_processor(self, dialect, coltype): ...
class _DateTimeMixin:
def __init__(self, storage_format: Any | None = ..., regexp: Any | None = ..., **kw) -> None: ...
@property
def format_is_text_affinity(self): ...
def adapt(self, cls, **kw): ...
def literal_processor(self, dialect): ...
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
def __init__(self, *args, **kwargs) -> None: ...
def bind_processor(self, dialect): ...
def result_processor(self, dialect, coltype): ...
class DATE(_DateTimeMixin, sqltypes.Date):
def bind_processor(self, dialect): ...
def result_processor(self, dialect, coltype): ...
class TIME(_DateTimeMixin, sqltypes.Time):
def __init__(self, *args, **kwargs) -> None: ...
def bind_processor(self, dialect): ...
def result_processor(self, dialect, coltype): ...
colspecs: Any
ischema_names: Any
class SQLiteCompiler(compiler.SQLCompiler):
extract_map: Any
def visit_now_func(self, fn, **kw): ...
def visit_localtimestamp_func(self, func, **kw): ...
def visit_true(self, expr, **kw): ...
def visit_false(self, expr, **kw): ...
def visit_char_length_func(self, fn, **kw): ...
def visit_cast(self, cast, **kwargs): ...
def visit_extract(self, extract, **kw): ...
def limit_clause(self, select, **kw): ...
def for_update_clause(self, select, **kw): ...
def visit_is_distinct_from_binary(self, binary, operator, **kw): ...
def visit_is_not_distinct_from_binary(self, binary, operator, **kw): ...
def visit_json_getitem_op_binary(self, binary, operator, **kw): ...
def visit_json_path_getitem_op_binary(self, binary, operator, **kw): ...
def visit_empty_set_op_expr(self, type_, expand_op): ...
def visit_empty_set_expr(self, element_types): ...
def visit_regexp_match_op_binary(self, binary, operator, **kw): ...
def visit_not_regexp_match_op_binary(self, binary, operator, **kw): ...
def visit_on_conflict_do_nothing(self, on_conflict, **kw): ...
def visit_on_conflict_do_update(self, on_conflict, **kw): ...
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs): ...
def visit_primary_key_constraint(self, constraint): ...
def visit_unique_constraint(self, constraint): ...
def visit_check_constraint(self, constraint): ...
def visit_column_check_constraint(self, constraint): ...
def visit_foreign_key_constraint(self, constraint): ...
def define_constraint_remote_table(self, constraint, table, preparer): ...
def visit_create_index(self, create, include_schema: bool = ..., include_table_schema: bool = ...): ... # type: ignore[override]
def post_create_table(self, table): ...
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw): ...
def visit_DATETIME(self, type_, **kw): ...
def visit_DATE(self, type_, **kw): ...
def visit_TIME(self, type_, **kw): ...
def visit_JSON(self, type_, **kw): ...
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words: Any
class SQLiteExecutionContext(default.DefaultExecutionContext): ...
class SQLiteDialect(default.DefaultDialect):
name: str
supports_alter: bool
supports_unicode_statements: bool
supports_unicode_binds: bool
supports_default_values: bool
supports_default_metavalue: bool
supports_empty_insert: bool
supports_cast: bool
supports_multivalues_insert: bool
tuple_in_values: bool
supports_statement_cache: bool
default_paramstyle: str
statement_compiler: Any
ddl_compiler: Any
type_compiler: Any
preparer: Any
ischema_names: Any
colspecs: Any
isolation_level: Any
construct_arguments: Any
native_datetime: Any
def __init__(
self,
isolation_level: Any | None = ...,
native_datetime: bool = ...,
json_serializer: Any | None = ...,
json_deserializer: Any | None = ...,
_json_serializer: Any | None = ...,
_json_deserializer: Any | None = ...,
**kwargs,
) -> None: ...
def set_isolation_level(self, connection, level) -> None: ...
def get_isolation_level(self, connection): ...
def on_connect(self): ...
def get_schema_names(self, connection, **kw): ...
def get_table_names(self, connection, schema: Any | None = ..., **kw): ...
def get_temp_table_names(self, connection, **kw): ...
def get_temp_view_names(self, connection, **kw): ...
def has_table(self, connection, table_name, schema: Any | None = ...): ... # type: ignore[override]
def get_view_names(self, connection, schema: Any | None = ..., **kw): ...
def get_view_definition(self, connection, view_name, schema: Any | None = ..., **kw): ...
def get_columns(self, connection, table_name, schema: Any | None = ..., **kw): ...
def get_pk_constraint(self, connection, table_name, schema: Any | None = ..., **kw): ...
def get_foreign_keys(self, connection, table_name, schema: Any | None = ..., **kw): ...
def get_unique_constraints(self, connection, table_name, schema: Any | None = ..., **kw): ...
def get_check_constraints(self, connection, table_name, schema: Any | None = ..., **kw): ...
def get_indexes(self, connection, table_name, schema: Any | None = ..., **kw): ...
|
5d8fd1868953cd6ece6bd7ebb353a603fdb9cfe0
|
0cd893fddf3a43459030292dad953c3810713513
|
/clif/testing/python/extend_properties_test.py
|
926f856bbd6473ef5a384d0df096f95e463348b0
|
[
"Apache-2.0"
] |
permissive
|
google/clif
|
8fc6d75f7e4a1a443f9bd596d05ea3e4c820e1c4
|
7501b3ca70a92a7a15022b3035bc4b1706f7569a
|
refs/heads/main
| 2023-08-19T06:26:17.321706
| 2023-08-18T23:18:45
| 2023-08-18T23:20:30
| 88,560,371
| 1,026
| 146
|
Apache-2.0
| 2023-08-31T23:41:41
| 2017-04-17T23:36:06
|
C++
|
UTF-8
|
Python
| false
| false
| 2,437
|
py
|
extend_properties_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from clif.testing.python import extend_properties
class ExtendPropertiesTest(absltest.TestCase):
def test_property_with_simple_getter(self):
expected_value = 543
ph = extend_properties.PropertyHolder(expected_value)
self.assertEqual(ph.value, expected_value)
def test_property_with_customized_getter(self):
expected_value = 5432
ph = extend_properties.PropertyHolder(expected_value)
self.assertEqual(ph.value_times_ten, expected_value * 10)
def test_property_with_getter_and_setter(self):
expected_value = 54321
ph = extend_properties.PropertyHolder(expected_value)
self.assertEqual(ph.value_gs, expected_value)
new_value = 12345
ph.value_gs = new_value
self.assertEqual(ph.value_gs, new_value)
def test_property_with_pointer_self(self):
expected_value = 54321
ph = extend_properties.PropertyHolder(expected_value)
self.assertEqual(ph.value_ptr_self, expected_value)
def test_uncopyable_property_holder(self):
expected_value = 54321
ph = extend_properties.UncopyableHolder(expected_value)
self.assertEqual(ph.value, expected_value)
new_value = 12345
ph.value = new_value
self.assertEqual(ph.value, new_value)
def test_bytes_property(self):
expected_value = b'54321'
ph = extend_properties.PropertyHolder(12345)
ph.value_bytes = expected_value
self.assertEqual(ph.value_bytes, expected_value)
def test_nested_property_getter(self):
ph = extend_properties.NestedPropertyHolder.Inner(83)
self.assertEqual(ph.value, 83 + 93 + 72)
def test_nested_property_getter_setter(self):
ph = extend_properties.NestedPropertyHolder.Inner(29)
self.assertEqual(ph.value_gs, 29 + 93 + 24)
ph.value_gs = 8
self.assertEqual(ph.value_gs, 8 + 57 + 24)
if __name__ == '__main__':
absltest.main()
|
e1ccb8be90cc6c706336a56d4b72aa326d2a4405
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/common/test_run/cpu/pooling_run.py
|
5172da149b97c8999e38b241e9317a0ad63331a3
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,688
|
py
|
pooling_run.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from akg.ops.nn.cpu import pooling
import akg
import tvm
import topi
import numpy as np
from akg.topi.util import get_const_tuple
from akg.utils import kernel_exec as utils
from akg.utils.result_analysis import target_profiling
from akg.utils.format_transform import to_tvm_nd_array
from tests.common.gen_random import random_gaussian
support_list = {"float32": np.float32}
support_layout_format = {"NCHW", "NCHWc"}
def gen_data(shape_data, kernel, stride, padding, pool_type, dtype, ceil_mode, count_include_pad, data_layout):
kw, kh = kernel
sw, sh = stride
pt, pl, pb, pr = padding
if data_layout == "NCHW":
n, ic, ih, iw = shape_data
input0 = tvm.placeholder((n, ic, ih, iw), name='input0')
output0 = topi.nn.pool(input0, kernel=[kh, kw], stride=[sh, sw], padding=padding,
pool_type=pool_type, ceil_mode=ceil_mode,
layout="NCHW", count_include_pad=count_include_pad)
a_np = random_gaussian((n, ic, ih, iw), miu=1, sigma=0.1).astype(support_list[dtype])
pad_np = np.zeros(shape=(n, ic, ih+pt+pb, iw+pl+pr)).astype(dtype)
no_zero = (range(n), range(ic), (range(pt, ih+pt)), (range(pl, iw+pl)))
pad_np[np.ix_(*no_zero)] = a_np
_, oc, oh, ow = get_const_tuple(output0.shape)
b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)
if pool_type == 'avg':
for i in range(oh):
for j in range(ow):
if count_include_pad:
b_np[:, :, i, j] = np.mean(
pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2, 3))
else:
pad_count = np.sum(
pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2, 3))
b_np[:, :, i, j] = np.sum(
pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2, 3)) / np.maximum(pad_count, 1)
elif pool_type == 'max':
for i in range(oh):
for j in range(ow):
b_np[:, :, i, j] = np.max(
pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2, 3))
output_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)
return a_np, output_np, b_np
elif data_layout == "NHWC":
raise ValueError("Only layout NCHWc/NCHW supported on python dsl")
else:
# NCHWc
n, ic_out, ih, iw, ic_in = shape_data
input0 = tvm.placeholder((n, ic_out, ih, iw, ic_in), name='input0')
output0 = topi.nn.pool(input0, kernel=[kh, kw], stride=[sh, sw], padding=padding,
pool_type=pool_type, ceil_mode=ceil_mode,
layout="NCHWc", count_include_pad=count_include_pad)
a_np = random_gaussian((n, ic_out, ih, iw, ic_in), miu=1, sigma=0.1).astype(support_list[dtype])
pad_np = np.zeros(shape=(n, ic_out, ih+pt+pb,
iw+pl+pr, ic_in)).astype(dtype)
no_zero = (range(n), range(ic_out), (range(pt, ih+pt)),
(range(pl, iw+pl)), range(ic_in))
pad_np[np.ix_(*no_zero)] = a_np
_, oc_out, oh, ow, oc_in = get_const_tuple(output0.shape)
b_np = np.zeros(shape=(n, oc_out, oh, ow, oc_in)).astype(dtype)
if pool_type == 'avg':
for i in range(oh):
for j in range(ow):
if count_include_pad:
b_np[:, :, i, j, :] = np.mean(
pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw, :], axis=(2, 3))
else:
pad_count = np.sum(
pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw, :] > 0, axis=(2, 3))
b_np[:, :, i, j, :] = np.sum(
pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw, :], axis=(2, 3)) / np.maximum(pad_count, 1)
elif pool_type == 'max':
for i in range(oh):
for j in range(ow):
b_np[:, :, i, j, :] = np.max(
pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw, :], axis=(2, 3))
output_np = np.zeros(shape=(n, oc_out, oh, ow, oc_in)).astype(dtype)
return a_np, output_np, b_np
def pooling_run(shape_data, kernel, stride, padding, pool_type, dtype,
ceil_mode, count_include_pad=True,
data_layout="NCHWc", poly_sch=True, attrs=None):
default_attrs = {"enable_auto_fuse": False,"polytops_parameter_shifting": True, "polytops_enable_skewing": False}
attrs = {} if attrs == None else attrs
attrs.update(default_attrs)
attrs["target"] = attrs.get("target", "llvm")
op_attrs = [kernel, stride, padding, pool_type,
ceil_mode, count_include_pad, data_layout]
mod = utils.op_build_test(pooling, (shape_data,), (dtype,),
op_attrs=op_attrs, attrs=attrs,
kernel_name="pooling_" + pool_type + "_auto", polyhedral=poly_sch)
data, output, expect = gen_data(
shape_data, kernel, stride, padding, pool_type, dtype, ceil_mode, count_include_pad, data_layout)
args = (data, output)
output = utils.mod_launch(mod, args, expect=expect)
rtol = 1e-3 if dtype == "float16" else 1e-4
atol = 1e-3 if dtype == "float16" else 1e-4
res = np.allclose(output, expect, rtol=rtol, atol=atol)
print("Test {}".format("Pass" if res else "Fail"))
target_name = attrs["target"].split()[0]
if not res:
mod_source = mod
if target_name != "llvm":
mod_source = mod.imported_modules[0]
print("Error {}:========================".format(target_name))
print(mod_source.get_source())
raise AssertionError("Test fail")
if attrs.get("profiling", False):
data, output = to_tvm_nd_array(
[data, output], akg.tvm.context(target_name, 0))
target_profiling(mod, data, output,
target=target_name, repeat_time=attrs.get("repeat_times", 1000))
return (data, ), output, expect, res
|
c2f443180c100ff196744f1fa6b0f133758b1731
|
f5870458d4e1b747bc0dbd30ab5d68c987e19d13
|
/pdoc/doc_pyi.py
|
2c8b10af95ad8050ad42388d4c6770770386dd1e
|
[
"Unlicense"
] |
permissive
|
mitmproxy/pdoc
|
ab2527d85fc1cb507b463bb21457e998459cfb12
|
c10bc068ab07d250cef2ab91943887f6de537efb
|
refs/heads/main
| 2023-09-04T00:49:31.583747
| 2023-09-01T08:53:39
| 2023-09-01T08:53:39
| 11,885,132
| 1,274
| 150
|
Unlicense
| 2023-09-10T09:50:34
| 2013-08-04T21:12:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,060
|
py
|
doc_pyi.py
|
"""
This module is responsible for patching `pdoc.doc.Doc` objects with type annotations found
in `.pyi` type stub files ([PEP 561](https://peps.python.org/pep-0561/)).
This makes it possible to add type hints for native modules such as modules written using [PyO3](https://pyo3.rs/).
"""
from __future__ import annotations
from pathlib import Path
import sys
import traceback
import types
from unittest import mock
import warnings
from pdoc import doc
from ._compat import cache
@cache
def find_stub_file(module_name: str) -> Path | None:
"""Try to find a .pyi file with type stubs for the given module name."""
module_path = module_name.replace(".", "/")
for dir in sys.path:
file_candidates = [
Path(dir) / (module_path + ".pyi"),
Path(dir) / (module_path + "/__init__.pyi"),
]
for f in file_candidates:
if f.exists():
return f
return None
def _import_stub_file(module_name: str, stub_file: Path) -> types.ModuleType:
"""Import the type stub outside of the normal import machinery."""
code = compile(stub_file.read_text(), str(stub_file), "exec")
m = types.ModuleType(module_name)
m.__file__ = str(stub_file)
eval(code, m.__dict__, m.__dict__)
return m
def _prepare_module(ns: doc.Namespace) -> None:
"""
Touch all lazy properties that are accessed in `_patch_doc` to make sure that they are precomputed.
We want to do this in advance while sys.modules is not monkeypatched yet.
"""
# at the moment, .members is the only lazy property that is accessed.
for member in ns.members.values():
if isinstance(member, doc.Class):
_prepare_module(member)
def _patch_doc(target_doc: doc.Doc, stub_mod: doc.Module) -> None:
"""
Patch the target doc (a "real" Python module, e.g. a ".py" file)
with the type information from stub_mod (a ".pyi" file).
"""
if target_doc.qualname:
stub_doc = stub_mod.get(target_doc.qualname)
if stub_doc is None:
return
else:
stub_doc = stub_mod
if isinstance(target_doc, doc.Function) and isinstance(stub_doc, doc.Function):
target_doc.signature = stub_doc.signature
target_doc.funcdef = stub_doc.funcdef
elif isinstance(target_doc, doc.Variable) and isinstance(stub_doc, doc.Variable):
target_doc.annotation = stub_doc.annotation
elif isinstance(target_doc, doc.Namespace) and isinstance(stub_doc, doc.Namespace):
# pdoc currently does not include variables without docstring in .members (not ideal),
# so the regular patching won't work. We manually copy over type annotations instead.
for k, v in stub_doc._var_annotations.items():
var = target_doc.members.get(k, None)
if isinstance(var, doc.Variable):
var.annotation = v
for m in target_doc.members.values():
_patch_doc(m, stub_mod)
else:
warnings.warn(
f"Error processing type stub for {target_doc.fullname}: "
f"Stub is a {stub_doc.kind}, but target is a {target_doc.kind}."
)
def include_typeinfo_from_stub_files(module: doc.Module) -> None:
"""Patch the provided module with type information from a matching .pyi file."""
# Check if module is a stub module itself - we don't want to recurse!
module_file = str(
doc._safe_getattr(sys.modules.get(module.modulename), "__file__", "")
)
if module_file.endswith(".pyi"):
return
stub_file = find_stub_file(module.modulename)
if not stub_file:
return
try:
imported_stub = _import_stub_file(module.modulename, stub_file)
except Exception:
warnings.warn(
f"Error parsing type stubs for {module.modulename}:\n{traceback.format_exc()}"
)
return
_prepare_module(module)
stub_mod = doc.Module(imported_stub)
with mock.patch.dict("sys.modules", {module.modulename: imported_stub}):
_patch_doc(module, stub_mod)
|
acfa3abf7af351f4e5370ff1095e8e21b479ea00
|
349471295cbaea395f0f8e493dc010cf170f6ed0
|
/bayesian_bootstrap/tests/test_bootstrap.py
|
2a2a5fd4063d9df9a7823feb67bb782942a9c9d9
|
[
"MIT"
] |
permissive
|
lmc2179/bayesian_bootstrap
|
f167a6f2c89a36b67ceec51ba1718e6b6010d6d6
|
93b8cf41b0675ec24a18e554f5011cdd07de7d91
|
refs/heads/master
| 2022-06-07T06:29:18.788807
| 2022-03-12T15:57:51
| 2022-03-12T15:57:51
| 89,321,721
| 127
| 22
|
MIT
| 2022-03-12T15:49:03
| 2017-04-25T05:42:32
|
Python
|
UTF-8
|
Python
| false
| false
| 8,262
|
py
|
test_bootstrap.py
|
import unittest
import numpy as np
import scipy
import bayesian_bootstrap as bb
from bayesian_bootstrap import (
mean,
var,
bayesian_bootstrap,
central_credible_interval,
highest_density_interval,
BayesianBootstrapBagging,
covar,
)
from sklearn.linear_model import LinearRegression
RNG = np.random.default_rng(1337) # repeatable pseudorandomness
class TestMoments(unittest.TestCase):
def test_mean(self):
X = [-1, 0, 1]
posterior_samples = mean(X, 10000)
self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.015)
self.assertAlmostEqual(len([s for s in posterior_samples if s < 0]), 5000, delta=1000)
def test_variance(self):
X = RNG.uniform(-1, 1, 500)
posterior_samples = var(X, 10000)
self.assertAlmostEqual(np.mean(posterior_samples), 1 / 3.0, delta=0.05)
def test_self_covar(self):
X = RNG.uniform(-1, 1, 500)
posterior_samples = covar(X, X, 10000)
self.assertAlmostEqual(np.mean(posterior_samples), np.var(X), delta=0.05)
def test_covar(self):
X = RNG.uniform(-1, 1, 500)
Y = RNG.uniform(-1, 1, 500)
posterior_samples = covar(X, Y, 10000)
self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.05)
def test_mean_resample(self):
X = [-1, 0, 1]
posterior_samples = bayesian_bootstrap(X, np.mean, 10000, 100, low_mem=True)
self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.01)
self.assertAlmostEqual(len([s for s in posterior_samples if s < 0]), 5000, delta=1000)
posterior_samples = bayesian_bootstrap(X, np.mean, 10000, 100, low_mem=False)
self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.01)
self.assertAlmostEqual(len([s for s in posterior_samples if s < 0]), 5000, delta=1000)
def test_var_resample(self):
X = RNG.uniform(-1, 1, 500)
posterior_samples = bayesian_bootstrap(X, np.var, 10000, 5000, low_mem=True)
self.assertAlmostEqual(np.mean(posterior_samples), 1 / 3.0, delta=0.05)
X = RNG.uniform(-1, 1, 500)
posterior_samples = bayesian_bootstrap(X, np.var, 10000, 5000, low_mem=False)
self.assertAlmostEqual(np.mean(posterior_samples), 1 / 3.0, delta=0.05)
class TestIntervals(unittest.TestCase):
def test_central_credible_interval(self):
l, r = central_credible_interval(self._shuffle(range(10)), alpha=0.2)
self.assertEqual(l, 0.9)
self.assertEqual(r, 8.1)
l, r = central_credible_interval(self._shuffle(range(10)), alpha=0.19)
self.assertEqual(l, 0.855)
self.assertEqual(r, 8.145)
l, r = central_credible_interval(self._shuffle(range(20)), alpha=0.1)
self.assertAlmostEqual(l, 0.95)
self.assertEqual(r, 18.05)
def test_hpdi(self):
l, r = highest_density_interval(self._shuffle([0, 10, 1] + [1.1] * 7), alpha=0.2)
self.assertEqual(l, 1)
self.assertEqual(r, 1.1)
l, r = highest_density_interval(self._shuffle([0, 10, 1.1, 1]), alpha=0.5)
self.assertEqual(l, 1)
self.assertEqual(r, 1.1)
def _shuffle(self, x):
x = list(x)
RNG.shuffle(x)
return x
class TestRegression(unittest.TestCase):
def test_parameter_estimation_resampling_low_memory(self):
X = RNG.uniform(0, 4, 1000)
y = X + RNG.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, 1000, low_mem=True)
m.fit(X.reshape(-1, 1), y)
coef_samples = [b.coef_ for b in m.base_models_]
intercept_samples = [b.intercept_ for b in m.base_models_]
self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
l, r = central_credible_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
l, r = highest_density_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = central_credible_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = highest_density_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
def test_parameter_estimation_resampling(self):
X = RNG.uniform(0, 4, 1000)
y = X + RNG.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, 1000, low_mem=False)
m.fit(X.reshape(-1, 1), y)
coef_samples = [b.coef_ for b in m.base_models_]
intercept_samples = [b.intercept_ for b in m.base_models_]
self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
l, r = central_credible_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
l, r = highest_density_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = central_credible_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = highest_density_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
def test_parameter_estimation_bayes(self):
X = RNG.uniform(0, 4, 1000)
y = X + RNG.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, low_mem=False)
m.fit(X.reshape(-1, 1), y)
coef_samples = [b.coef_ for b in m.base_models_]
intercept_samples = [b.intercept_ for b in m.base_models_]
self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
l, r = central_credible_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
l, r = highest_density_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = central_credible_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = highest_density_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
def test_parameter_estimation_bayes_low_memory(self):
X = RNG.uniform(0, 4, 1000)
y = X + RNG.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, low_mem=True)
m.fit(X.reshape(-1, 1), y)
coef_samples = [b.coef_ for b in m.base_models_]
intercept_samples = [b.intercept_ for b in m.base_models_]
self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
l, r = central_credible_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
l, r = highest_density_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = central_credible_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = highest_density_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
def test_pearsonr():
x = np.linspace(0, 5, 10)
y = np.linspace(0, 5, 10)
assert np.mean(bb.pearsonr(x, y, 10000)) == 1
assert np.mean(bb.pearsonr(x, -y, 10000)) == -1
x = [0, 1, 3, 6]
y = [1, 2, 5, 7]
assert np.isclose(np.mean(bb.pearsonr(x, y, 10000)), scipy.stats.pearsonr(x, y)[0], atol=0.001)
x = np.linspace(-10, 10, 10000)
y = np.abs(x)
assert np.isclose(scipy.stats.pearsonr(x, y)[0], np.mean(bb.pearsonr(x, y, 1000)), atol=0.001)
if __name__ == "__main__":
unittest.main()
|
b16d1369b50ab90ffaeb03d5eae6b85bc0f946a5
|
d4412fbe37540e2c4cbe59ed6503d3661ccb7d9c
|
/colossalai/shardformer/modeling/blip2.py
|
69730fd3d254e266441d4f18dd3068ce886726f5
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
hpcaitech/ColossalAI
|
a082ed08a3807b53c49d1f86835b9808590d9042
|
c7b60f75470f067d1342705708810a660eabd684
|
refs/heads/main
| 2023-09-01T04:13:13.834565
| 2023-08-30T15:07:21
| 2023-08-30T15:07:21
| 422,274,596
| 32,044
| 4,084
|
Apache-2.0
| 2023-09-14T15:19:54
| 2021-10-28T16:19:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,352
|
py
|
blip2.py
|
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
def forward_fn():
def forward(
self,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
mixed_qkv = self.qkv(hidden_states)
# modified from original code, which is:
# mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(
# 2, 0, 3, 1, 4
# )
# to:
mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
query_states, key_states, value_states = (
mixed_qkv[0],
mixed_qkv[1],
mixed_qkv[2],
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
attention_scores = attention_scores * self.scale
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.projection(context_layer)
outputs = (output, attention_probs) if output_attentions else (output, None)
return outputs
return forward
def get_blip2_flash_attention_forward():
from transformers.models.blip_2.modeling_blip_2 import Blip2Attention
from colossalai.kernel.cuda_native import ColoAttention
def forward(
self: Blip2Attention,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
mixed_qkv = self.qkv(hidden_states)
mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, -1).permute(2, 0, 1, 3, 4)
query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
attention = ColoAttention(embed_dim=self.embed_dim,
num_heads=self.num_heads,
dropout=self.dropout.p,
scale=self.scale)
context_layer = attention(query_states, key_states, value_states)
output = self.projection(context_layer)
outputs = (output, None)
return outputs
return forward
def get_jit_fused_blip2_QFormer_self_output_forward():
from transformers.models.blip_2.modeling_blip_2 import Blip2QFormerSelfOutput
def forward(self: Blip2QFormerSelfOutput, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout_add(hidden_states, input_tensor, self.dropout.p, self.dropout.training)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
return forward
def get_jit_fused_blip2_QFormer_output_forward():
from transformers.models.blip_2.modeling_blip_2 import Blip2QFormerOutput
def forward(self: Blip2QFormerOutput, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout_add(hidden_states, input_tensor, self.dropout.p, self.dropout.training)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
return forward
|
16cab5c1db904939062fea82d6c2c204dc774a6e
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/ir/inference/test_trt_gather_nd_op.py
|
a6389756df915ab97cf2f38b85e7d67d9a8f115b
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 3,846
|
py
|
test_trt_gather_nd_op.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.core import AnalysisConfig, PassVersionChecker
from paddle.static import nn
class TRTGatherNdTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = paddle.static.data(
name="data", shape=[-1, 3, 4], dtype="float32"
)
index = paddle.static.data(
name="index", shape=[-1, 2, 2], dtype="int32"
)
gather_nd = paddle.gather_nd(data, index)
out = nn.batch_norm(gather_nd, is_test=True)
self.feeds = {
"data": np.random.random([2, 3, 4]).astype("float32"),
"index": np.array([[[0, 1], [1, 0]], [[1, 2], [0, 1]]]).astype(
"int32"
),
}
self.enable_trt = True
self.trt_parameters = TRTGatherNdTest.TensorRTParam(
1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False
)
self.fetch_list = [out]
self.dynamic_shape_params = TRTGatherNdTest.DynamicShapeParam(
{'data': [1, 3, 4], 'index': [1, 2, 2]},
{'data': [3, 3, 4], 'index': [3, 2, 2]},
{'data': [3, 3, 4], 'index': [3, 2, 2]},
False,
)
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
)
class TRTGatherNdFp16Test(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = paddle.static.data(
name="data", shape=[-1, 1280, 192], dtype="float32"
)
index = paddle.static.data(
name="index", shape=[-1, 1028, 2], dtype="int32"
)
gather_nd = paddle.gather_nd(data, index)
out = nn.batch_norm(gather_nd, is_test=True)
index_data = np.zeros((1, 1028, 2), dtype='int32')
self.feeds = {
"data": np.random.random([1, 1280, 192]).astype("float32"),
"index": index_data,
}
self.enable_trt = True
self.trt_parameters = TRTGatherNdFp16Test.TensorRTParam(
1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False
)
self.fetch_list = [out]
self.dynamic_shape_params = TRTGatherNdFp16Test.DynamicShapeParam(
{'data': [1, 1280, 192], 'index': [1, 1028, 2]},
{'data': [3, 1280, 192], 'index': [3, 1028, 2]},
{'data': [3, 1280, 192], 'index': [3, 1028, 2]},
False,
)
def test_check_output(self, atol=1e-3):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
)
if __name__ == "__main__":
unittest.main()
|
9f6a4edaff1d2aa24375f7f92e5aa25ad21dcea2
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CalibPPS/ESProducers/python/ctppsOpticalFunctions_cff.py
|
a114bb9ad8249fd5e0697d67e27e2acb69725c63
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
ctppsOpticalFunctions_cff.py
|
import FWCore.ParameterSet.Config as cms
from CalibPPS.ESProducers.ctppsLHCInfo_cff import *
# by default, (raw) optical functions are now loaded from CondDB using a GT
#from CalibPPS.ESProducers.ctppsOpticalFunctionsESSource_cfi import *
#
## add 2016 pre-TS2 configuration
#config_2016_preTS2 = cms.PSet(
# validityRange = cms.EventRange("273725:min - 280385:max"),
#
# opticalFunctions = cms.VPSet(
# cms.PSet( xangle = cms.double(185), fileName = cms.FileInPath("CalibPPS/ESProducers/data/optical_functions_2016.root") )
# ),
#
# scoringPlanes = cms.VPSet(
# # z in cm
# cms.PSet( rpId = cms.uint32(0x76100000), dirName = cms.string("XRPH_C6L5_B2"), z = cms.double(-20382.6) ), # RP 002, strip
# cms.PSet( rpId = cms.uint32(0x76180000), dirName = cms.string("XRPH_D6L5_B2"), z = cms.double(-21255.1) ), # RP 003, strip
# cms.PSet( rpId = cms.uint32(0x77100000), dirName = cms.string("XRPH_C6R5_B1"), z = cms.double(+20382.6) ), # RP 102, strip
# cms.PSet( rpId = cms.uint32(0x77180000), dirName = cms.string("XRPH_D6R5_B1"), z = cms.double(+21255.1) ), # RP 103, strip
# )
#)
#
#ctppsOpticalFunctionsESSource.configuration.append(config_2016_preTS2)
# optics interpolation between crossing angles
from CalibPPS.ESProducers.ctppsInterpolatedOpticalFunctionsESSource_cfi import *
ctppsInterpolatedOpticalFunctionsESSource.lhcInfoLabel = ctppsLHCInfoLabel
|
6fde79d880d2025985e443709a5e0e043f4713a5
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/collect-coins-in-a-tree.py
|
36af359520125ba6507f5c123a0921d4e885954a
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
collect-coins-in-a-tree.py
|
# Time: O(n)
# Space: O(n)
# tree, bfs
class Solution(object):
def collectTheCoins(self, coins, edges):
"""
:type coins: List[int]
:type edges: List[List[int]]
:rtype: int
"""
DISTANCE = 2
adj = [set() for _ in xrange(len(coins))]
for u, v in edges:
adj[u].add(v)
adj[v].add(u)
n = len(coins)
q = []
for u in xrange(len(coins)):
while len(adj[u]) == 1 and not coins[u]:
v = adj[u].pop()
adj[v].remove(u)
n -= 1
u = v
q = [u for u in xrange(len(coins)) if len(adj[u]) == 1]
for _ in xrange(DISTANCE):
new_q = []
for u in q:
if not adj[u]:
assert(n == 1)
break
v = adj[u].pop()
adj[v].remove(u)
n -= 1
if len(adj[v]) == 1:
new_q.append(v)
q = new_q
return (n-1)*2
|
6afb61b6db57472525be08749ccea3bc70dd96aa
|
0db19410e9751790af8ce4a0a9332293e379c02f
|
/tests/test_models/test_backbones/test_resnet.py
|
4774f250c29165b5ffa494cbfa2b5956bd8f01b0
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmpose
|
2c9986521d35eee35d822fb255e8e68486026d94
|
537bd8e543ab463fb55120d5caaa1ae22d6aaf06
|
refs/heads/main
| 2023-08-30T19:44:21.349410
| 2023-07-04T13:18:22
| 2023-07-04T13:18:22
| 278,003,645
| 4,037
| 1,171
|
Apache-2.0
| 2023-09-14T09:44:55
| 2020-07-08T06:02:55
|
Python
|
UTF-8
|
Python
| false
| false
| 23,537
|
py
|
test_resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm
from mmpose.models.backbones import ResNet, ResNetV1d
from mmpose.models.backbones.resnet import (BasicBlock, Bottleneck, ResLayer,
get_expansion)
class TestResnet(TestCase):
@staticmethod
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck)):
return True
return False
@staticmethod
def all_zeros(modules):
"""Check if the weight(and bias) is all zero."""
weight_zero = torch.equal(modules.weight.data,
torch.zeros_like(modules.weight.data))
if hasattr(modules, 'bias'):
bias_zero = torch.equal(modules.bias.data,
torch.zeros_like(modules.bias.data))
else:
bias_zero = True
return weight_zero and bias_zero
@staticmethod
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
def test_get_expansion(self):
self.assertEqual(get_expansion(Bottleneck, 2), 2)
self.assertEqual(get_expansion(BasicBlock), 1)
self.assertEqual(get_expansion(Bottleneck), 4)
class MyResBlock(nn.Module):
expansion = 8
self.assertEqual(get_expansion(MyResBlock), 8)
# expansion must be an integer or None
with self.assertRaises(TypeError):
get_expansion(Bottleneck, '0')
# expansion is not specified and cannot be inferred
with self.assertRaises(TypeError):
class SomeModule(nn.Module):
pass
get_expansion(SomeModule)
def test_basic_block(self):
# expansion must be 1
with self.assertRaises(AssertionError):
BasicBlock(64, 64, expansion=2)
# BasicBlock with stride 1, out_channels == in_channels
block = BasicBlock(64, 64)
self.assertEqual(block.in_channels, 64)
self.assertEqual(block.mid_channels, 64)
self.assertEqual(block.out_channels, 64)
self.assertEqual(block.conv1.in_channels, 64)
self.assertEqual(block.conv1.out_channels, 64)
self.assertEqual(block.conv1.kernel_size, (3, 3))
self.assertEqual(block.conv1.stride, (1, 1))
self.assertEqual(block.conv2.in_channels, 64)
self.assertEqual(block.conv2.out_channels, 64)
self.assertEqual(block.conv2.kernel_size, (3, 3))
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56]))
# BasicBlock with stride 1 and downsample
downsample = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=1, bias=False), nn.BatchNorm2d(128))
block = BasicBlock(64, 128, downsample=downsample)
self.assertEqual(block.in_channels, 64)
self.assertEqual(block.mid_channels, 128)
self.assertEqual(block.out_channels, 128)
self.assertEqual(block.conv1.in_channels, 64)
self.assertEqual(block.conv1.out_channels, 128)
self.assertEqual(block.conv1.kernel_size, (3, 3))
self.assertEqual(block.conv1.stride, (1, 1))
self.assertEqual(block.conv2.in_channels, 128)
self.assertEqual(block.conv2.out_channels, 128)
self.assertEqual(block.conv2.kernel_size, (3, 3))
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
self.assertEqual(x_out.shape, torch.Size([1, 128, 56, 56]))
# BasicBlock with stride 2 and downsample
downsample = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(128))
block = BasicBlock(64, 128, stride=2, downsample=downsample)
self.assertEqual(block.in_channels, 64)
self.assertEqual(block.mid_channels, 128)
self.assertEqual(block.out_channels, 128)
self.assertEqual(block.conv1.in_channels, 64)
self.assertEqual(block.conv1.out_channels, 128)
self.assertEqual(block.conv1.kernel_size, (3, 3))
self.assertEqual(block.conv1.stride, (2, 2))
self.assertEqual(block.conv2.in_channels, 128)
self.assertEqual(block.conv2.out_channels, 128)
self.assertEqual(block.conv2.kernel_size, (3, 3))
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
self.assertEqual(x_out.shape, torch.Size([1, 128, 28, 28]))
# forward with checkpointing
block = BasicBlock(64, 64, with_cp=True)
self.assertTrue(block.with_cp)
x = torch.randn(1, 64, 56, 56, requires_grad=True)
x_out = block(x)
self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56]))
def test_bottleneck(self):
# style must be in ['pytorch', 'caffe']
with self.assertRaises(AssertionError):
Bottleneck(64, 64, style='tensorflow')
# expansion must be divisible by out_channels
with self.assertRaises(AssertionError):
Bottleneck(64, 64, expansion=3)
# Test Bottleneck style
block = Bottleneck(64, 64, stride=2, style='pytorch')
self.assertEqual(block.conv1.stride, (1, 1))
self.assertEqual(block.conv2.stride, (2, 2))
block = Bottleneck(64, 64, stride=2, style='caffe')
self.assertEqual(block.conv1.stride, (2, 2))
self.assertEqual(block.conv2.stride, (1, 1))
# Bottleneck with stride 1
block = Bottleneck(64, 64, style='pytorch')
self.assertEqual(block.in_channels, 64)
self.assertEqual(block.mid_channels, 16)
self.assertEqual(block.out_channels, 64)
self.assertEqual(block.conv1.in_channels, 64)
self.assertEqual(block.conv1.out_channels, 16)
self.assertEqual(block.conv1.kernel_size, (1, 1))
self.assertEqual(block.conv2.in_channels, 16)
self.assertEqual(block.conv2.out_channels, 16)
self.assertEqual(block.conv2.kernel_size, (3, 3))
self.assertEqual(block.conv3.in_channels, 16)
self.assertEqual(block.conv3.out_channels, 64)
self.assertEqual(block.conv3.kernel_size, (1, 1))
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
self.assertEqual(x_out.shape, (1, 64, 56, 56))
# Bottleneck with stride 1 and downsample
downsample = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=1), nn.BatchNorm2d(128))
block = Bottleneck(64, 128, style='pytorch', downsample=downsample)
self.assertEqual(block.in_channels, 64)
self.assertEqual(block.mid_channels, 32)
self.assertEqual(block.out_channels, 128)
self.assertEqual(block.conv1.in_channels, 64)
self.assertEqual(block.conv1.out_channels, 32)
self.assertEqual(block.conv1.kernel_size, (1, 1))
self.assertEqual(block.conv2.in_channels, 32)
self.assertEqual(block.conv2.out_channels, 32)
self.assertEqual(block.conv2.kernel_size, (3, 3))
self.assertEqual(block.conv3.in_channels, 32)
self.assertEqual(block.conv3.out_channels, 128)
self.assertEqual(block.conv3.kernel_size, (1, 1))
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
self.assertEqual(x_out.shape, (1, 128, 56, 56))
# Bottleneck with stride 2 and downsample
downsample = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=1, stride=2), nn.BatchNorm2d(128))
block = Bottleneck(
64, 128, stride=2, style='pytorch', downsample=downsample)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
self.assertEqual(x_out.shape, (1, 128, 28, 28))
# Bottleneck with expansion 2
block = Bottleneck(64, 64, style='pytorch', expansion=2)
self.assertEqual(block.in_channels, 64)
self.assertEqual(block.mid_channels, 32)
self.assertEqual(block.out_channels, 64)
self.assertEqual(block.conv1.in_channels, 64)
self.assertEqual(block.conv1.out_channels, 32)
self.assertEqual(block.conv1.kernel_size, (1, 1))
self.assertEqual(block.conv2.in_channels, 32)
self.assertEqual(block.conv2.out_channels, 32)
self.assertEqual(block.conv2.kernel_size, (3, 3))
self.assertEqual(block.conv3.in_channels, 32)
self.assertEqual(block.conv3.out_channels, 64)
self.assertEqual(block.conv3.kernel_size, (1, 1))
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
self.assertEqual(x_out.shape, (1, 64, 56, 56))
# Test Bottleneck with checkpointing
block = Bottleneck(64, 64, with_cp=True)
block.train()
self.assertTrue(block.with_cp)
x = torch.randn(1, 64, 56, 56, requires_grad=True)
x_out = block(x)
self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56]))
def test_basicblock_reslayer(self):
# 3 BasicBlock w/o downsample
layer = ResLayer(BasicBlock, 3, 32, 32)
self.assertEqual(len(layer), 3)
for i in range(3):
self.assertEqual(layer[i].in_channels, 32)
self.assertEqual(layer[i].out_channels, 32)
self.assertIsNone(layer[i].downsample)
x = torch.randn(1, 32, 56, 56)
x_out = layer(x)
self.assertEqual(x_out.shape, (1, 32, 56, 56))
# 3 BasicBlock w/ stride 1 and downsample
layer = ResLayer(BasicBlock, 3, 32, 64)
self.assertEqual(len(layer), 3)
self.assertEqual(layer[0].in_channels, 32)
self.assertEqual(layer[0].out_channels, 64)
self.assertEqual(
layer[0].downsample is not None and len(layer[0].downsample), 2)
self.assertIsInstance(layer[0].downsample[0], nn.Conv2d)
self.assertEqual(layer[0].downsample[0].stride, (1, 1))
for i in range(1, 3):
self.assertEqual(layer[i].in_channels, 64)
self.assertEqual(layer[i].out_channels, 64)
self.assertIsNone(layer[i].downsample)
x = torch.randn(1, 32, 56, 56)
x_out = layer(x)
self.assertEqual(x_out.shape, (1, 64, 56, 56))
# 3 BasicBlock w/ stride 2 and downsample
layer = ResLayer(BasicBlock, 3, 32, 64, stride=2)
self.assertEqual(len(layer), 3)
self.assertEqual(layer[0].in_channels, 32)
self.assertEqual(layer[0].out_channels, 64)
self.assertEqual(layer[0].stride, 2)
self.assertEqual(
layer[0].downsample is not None and len(layer[0].downsample), 2)
self.assertIsInstance(layer[0].downsample[0], nn.Conv2d)
self.assertEqual(layer[0].downsample[0].stride, (2, 2))
for i in range(1, 3):
self.assertEqual(layer[i].in_channels, 64)
self.assertEqual(layer[i].out_channels, 64)
self.assertEqual(layer[i].stride, 1)
self.assertIsNone(layer[i].downsample)
x = torch.randn(1, 32, 56, 56)
x_out = layer(x)
self.assertEqual(x_out.shape, (1, 64, 28, 28))
# 3 BasicBlock w/ stride 2 and downsample with avg pool
layer = ResLayer(BasicBlock, 3, 32, 64, stride=2, avg_down=True)
self.assertEqual(len(layer), 3)
self.assertEqual(layer[0].in_channels, 32)
self.assertEqual(layer[0].out_channels, 64)
self.assertEqual(layer[0].stride, 2)
self.assertEqual(
layer[0].downsample is not None and len(layer[0].downsample), 3)
self.assertIsInstance(layer[0].downsample[0], nn.AvgPool2d)
self.assertEqual(layer[0].downsample[0].stride, 2)
for i in range(1, 3):
self.assertEqual(layer[i].in_channels, 64)
self.assertEqual(layer[i].out_channels, 64)
self.assertEqual(layer[i].stride, 1)
self.assertIsNone(layer[i].downsample)
x = torch.randn(1, 32, 56, 56)
x_out = layer(x)
self.assertEqual(x_out.shape, (1, 64, 28, 28))
def test_bottleneck_reslayer(self):
# 3 Bottleneck w/o downsample
layer = ResLayer(Bottleneck, 3, 32, 32)
self.assertEqual(len(layer), 3)
for i in range(3):
self.assertEqual(layer[i].in_channels, 32)
self.assertEqual(layer[i].out_channels, 32)
self.assertIsNone(layer[i].downsample)
x = torch.randn(1, 32, 56, 56)
x_out = layer(x)
self.assertEqual(x_out.shape, (1, 32, 56, 56))
# 3 Bottleneck w/ stride 1 and downsample
layer = ResLayer(Bottleneck, 3, 32, 64)
self.assertEqual(len(layer), 3)
self.assertEqual(layer[0].in_channels, 32)
self.assertEqual(layer[0].out_channels, 64)
self.assertEqual(layer[0].stride, 1)
self.assertEqual(layer[0].conv1.out_channels, 16)
self.assertEqual(
layer[0].downsample is not None and len(layer[0].downsample), 2)
self.assertIsInstance(layer[0].downsample[0], nn.Conv2d)
self.assertEqual(layer[0].downsample[0].stride, (1, 1))
for i in range(1, 3):
self.assertEqual(layer[i].in_channels, 64)
self.assertEqual(layer[i].out_channels, 64)
self.assertEqual(layer[i].conv1.out_channels, 16)
self.assertEqual(layer[i].stride, 1)
self.assertIsNone(layer[i].downsample)
x = torch.randn(1, 32, 56, 56)
x_out = layer(x)
self.assertEqual(x_out.shape, (1, 64, 56, 56))
# 3 Bottleneck w/ stride 2 and downsample
layer = ResLayer(Bottleneck, 3, 32, 64, stride=2)
self.assertEqual(len(layer), 3)
self.assertEqual(layer[0].in_channels, 32)
self.assertEqual(layer[0].out_channels, 64)
self.assertEqual(layer[0].stride, 2)
self.assertEqual(layer[0].conv1.out_channels, 16)
self.assertEqual(
layer[0].downsample is not None and len(layer[0].downsample), 2)
self.assertIsInstance(layer[0].downsample[0], nn.Conv2d)
self.assertEqual(layer[0].downsample[0].stride, (2, 2))
for i in range(1, 3):
self.assertEqual(layer[i].in_channels, 64)
self.assertEqual(layer[i].out_channels, 64)
self.assertEqual(layer[i].conv1.out_channels, 16)
self.assertEqual(layer[i].stride, 1)
self.assertIsNone(layer[i].downsample)
x = torch.randn(1, 32, 56, 56)
x_out = layer(x)
self.assertEqual(x_out.shape, (1, 64, 28, 28))
# 3 Bottleneck w/ stride 2 and downsample with avg pool
layer = ResLayer(Bottleneck, 3, 32, 64, stride=2, avg_down=True)
self.assertEqual(len(layer), 3)
self.assertEqual(layer[0].in_channels, 32)
self.assertEqual(layer[0].out_channels, 64)
self.assertEqual(layer[0].stride, 2)
self.assertEqual(layer[0].conv1.out_channels, 16)
self.assertEqual(
layer[0].downsample is not None and len(layer[0].downsample), 3)
self.assertIsInstance(layer[0].downsample[0], nn.AvgPool2d)
self.assertEqual(layer[0].downsample[0].stride, 2)
for i in range(1, 3):
self.assertEqual(layer[i].in_channels, 64)
self.assertEqual(layer[i].out_channels, 64)
self.assertEqual(layer[i].conv1.out_channels, 16)
self.assertEqual(layer[i].stride, 1)
self.assertIsNone(layer[i].downsample)
x = torch.randn(1, 32, 56, 56)
x_out = layer(x)
self.assertEqual(x_out.shape, (1, 64, 28, 28))
# 3 Bottleneck with custom expansion
layer = ResLayer(Bottleneck, 3, 32, 32, expansion=2)
self.assertEqual(len(layer), 3)
for i in range(3):
self.assertEqual(layer[i].in_channels, 32)
self.assertEqual(layer[i].out_channels, 32)
self.assertEqual(layer[i].stride, 1)
self.assertEqual(layer[i].conv1.out_channels, 16)
self.assertIsNone(layer[i].downsample)
x = torch.randn(1, 32, 56, 56)
x_out = layer(x)
self.assertEqual(x_out.shape, (1, 32, 56, 56))
def test_resnet(self):
"""Test resnet backbone."""
with self.assertRaises(KeyError):
# ResNet depth should be in [18, 34, 50, 101, 152]
ResNet(20)
with self.assertRaises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=0)
with self.assertRaises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=5)
with self.assertRaises(AssertionError):
# len(strides) == len(dilations) == num_stages
ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)
with self.assertRaises(AssertionError):
# Style must be in ['pytorch', 'caffe']
ResNet(50, style='tensorflow')
# Test ResNet50 norm_eval=True
model = ResNet(50, norm_eval=True)
model.init_weights()
model.train()
self.assertTrue(self.check_norm_state(model.modules(), False))
# Test ResNet50 with torchvision pretrained weight
init_cfg = dict(type='Pretrained', checkpoint='torchvision://resnet50')
model = ResNet(depth=50, norm_eval=True, init_cfg=init_cfg)
model.train()
self.assertTrue(self.check_norm_state(model.modules(), False))
# Test ResNet50 with first stage frozen
frozen_stages = 1
model = ResNet(50, frozen_stages=frozen_stages)
model.init_weights()
model.train()
self.assertFalse(model.norm1.training)
for layer in [model.conv1, model.norm1]:
for param in layer.parameters():
self.assertFalse(param.requires_grad)
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
self.assertFalse(mod.training)
for param in layer.parameters():
self.assertFalse(param.requires_grad)
# Test ResNet18 forward
model = ResNet(18, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
self.assertEqual(len(feat), 4)
self.assertEqual(feat[0].shape, (1, 64, 56, 56))
self.assertEqual(feat[1].shape, (1, 128, 28, 28))
self.assertEqual(feat[2].shape, (1, 256, 14, 14))
self.assertEqual(feat[3].shape, (1, 512, 7, 7))
# Test ResNet50 with BatchNorm forward
model = ResNet(50, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
self.assertEqual(len(feat), 4)
self.assertEqual(feat[0].shape, (1, 256, 56, 56))
self.assertEqual(feat[1].shape, (1, 512, 28, 28))
self.assertEqual(feat[2].shape, (1, 1024, 14, 14))
self.assertEqual(feat[3].shape, (1, 2048, 7, 7))
# Test ResNet50 with layers 1, 2, 3 out forward
model = ResNet(50, out_indices=(0, 1, 2))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
self.assertEqual(len(feat), 3)
self.assertEqual(feat[0].shape, (1, 256, 56, 56))
self.assertEqual(feat[1].shape, (1, 512, 28, 28))
self.assertEqual(feat[2].shape, (1, 1024, 14, 14))
# Test ResNet50 with layers 3 (top feature maps) out forward
model = ResNet(50, out_indices=(3, ))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
self.assertEqual(len(feat), 1)
self.assertEqual(feat[-1].shape, (1, 2048, 7, 7))
# Test ResNet50 with checkpoint forward
model = ResNet(50, out_indices=(0, 1, 2, 3), with_cp=True)
for m in model.modules():
if self.is_block(m):
self.assertTrue(m.with_cp)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
self.assertEqual(len(feat), 4)
self.assertEqual(feat[0].shape, (1, 256, 56, 56))
self.assertEqual(feat[1].shape, (1, 512, 28, 28))
self.assertEqual(feat[2].shape, (1, 1024, 14, 14))
self.assertEqual(feat[3].shape, (1, 2048, 7, 7))
# zero initialization of residual blocks
model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
self.assertTrue(self.all_zeros(m.norm3))
elif isinstance(m, BasicBlock):
self.assertTrue(self.all_zeros(m.norm2))
# non-zero initialization of residual blocks
model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=False)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
self.assertFalse(self.all_zeros(m.norm3))
elif isinstance(m, BasicBlock):
self.assertFalse(self.all_zeros(m.norm2))
def test_resnet_v1d(self):
model = ResNetV1d(depth=50, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
self.assertEqual(len(model.stem), 3)
for i in range(3):
self.assertIsInstance(model.stem[i], ConvModule)
imgs = torch.randn(1, 3, 224, 224)
feat = model.stem(imgs)
self.assertEqual(feat.shape, (1, 64, 112, 112))
feat = model(imgs)
self.assertEqual(len(feat), 4)
self.assertEqual(feat[0].shape, (1, 256, 56, 56))
self.assertEqual(feat[1].shape, (1, 512, 28, 28))
self.assertEqual(feat[2].shape, (1, 1024, 14, 14))
self.assertEqual(feat[3].shape, (1, 2048, 7, 7))
# Test ResNet50V1d with first stage frozen
frozen_stages = 1
model = ResNetV1d(depth=50, frozen_stages=frozen_stages)
self.assertEqual(len(model.stem), 3)
for i in range(3):
self.assertIsInstance(model.stem[i], ConvModule)
model.init_weights()
model.train()
self.assertTrue(self.check_norm_state(model.stem, False))
for param in model.stem.parameters():
self.assertFalse(param.requires_grad)
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
self.assertFalse(mod.training)
for param in layer.parameters():
self.assertFalse(param.requires_grad)
def test_resnet_half_channel(self):
model = ResNet(50, base_channels=32, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
self.assertEqual(len(feat), 4)
self.assertEqual(feat[0].shape, (1, 128, 56, 56))
self.assertEqual(feat[1].shape, (1, 256, 28, 28))
self.assertEqual(feat[2].shape, (1, 512, 14, 14))
self.assertEqual(feat[3].shape, (1, 1024, 7, 7))
|
3c1b9cee1a561e64be5a8b96cf30b1068d4cbd48
|
4a5681a81c8720087291bcf6caacef083cb7af16
|
/src/python/strelka/tests/test_scan_vhd.py
|
f63c57e18b70c345def9cdf35418a522eb462c73
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
target/strelka
|
8d0801dbca270c46bd2ea998c0cf72f80e741cee
|
e42800362535a8fec956689e4de81235231022db
|
refs/heads/master
| 2023-08-18T15:35:01.150070
| 2023-08-15T12:05:07
| 2023-08-15T12:05:07
| 149,654,117
| 739
| 114
|
NOASSERTION
| 2023-09-12T18:18:25
| 2018-09-20T18:38:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,901
|
py
|
test_scan_vhd.py
|
from pathlib import Path
from unittest import TestCase, mock
from strelka.scanners.scan_vhd import ScanVhd as ScanUnderTest
from strelka.tests import run_test_scan
def test_scan_vhd(mocker):
"""
Pass: Sample event matches output of scanner.
Failure: Unable to load file or sample event fails to match.
"""
test_scan_event = {
"elapsed": mock.ANY,
"flags": [],
"total": {"files": 3, "extracted": 3},
"files": [
{
"filename": "System Volume Information/WPSettings.dat",
"size": "12",
"datetime": mock.ANY,
},
{
"filename": "lorem.txt",
"size": "4015",
"datetime": mock.ANY,
},
{
"filename": "$RECYCLE.BIN/S-1-5-21-3712961497-200595429-3248382696-1000/desktop.ini",
"size": "129",
"datetime": mock.ANY,
},
],
"hidden_dirs": [
"System Volume Information",
"$RECYCLE.BIN",
"$RECYCLE.BIN/S-1-5-21-3712961497-200595429-3248382696-1000",
],
"meta": {
"7zip_version": "22.01",
"partitions": [
{"path": mock.ANY, "type": "GPT"},
{"path": "0.Basic data partition.ntfs", "file_system": "Windows BDP"},
{
"path": "0.Basic data partition.ntfs",
"type": "NTFS",
"label": "New Volume",
"file_system": "NTFS 3.1",
"created": mock.ANY,
},
],
},
}
scanner_event = run_test_scan(
mocker=mocker,
scan_class=ScanUnderTest,
fixture_path=Path(__file__).parent / "fixtures/test.vhd",
)
TestCase.maxDiff = None
TestCase().assertDictEqual(test_scan_event, scanner_event)
def test_scan_vhdx(mocker):
"""
Pass: Sample event matches output of scanner.
Failure: Unable to load file or sample event fails to match.
"""
test_scan_event = {
"elapsed": mock.ANY,
"flags": [],
"total": {"files": 3, "extracted": 3},
"files": [
{
"filename": "System Volume Information/WPSettings.dat",
"size": "12",
"datetime": mock.ANY,
},
{
"filename": "lorem.txt",
"size": "4015",
"datetime": mock.ANY,
},
{
"filename": "$RECYCLE.BIN/S-1-5-21-3712961497-200595429-3248382696-1000/desktop.ini",
"size": "129",
"datetime": mock.ANY,
},
],
"hidden_dirs": [
"System Volume Information",
"$RECYCLE.BIN",
"$RECYCLE.BIN/S-1-5-21-3712961497-200595429-3248382696-1000",
],
"meta": {
"7zip_version": "22.01",
"partitions": [
{
"path": mock.ANY,
"type": "VHDX",
"creator_application": "Microsoft Windows 10.0.19044.0",
},
{"path": mock.ANY, "type": "GPT"},
{"path": "0.Basic data partition.ntfs", "file_system": "Windows BDP"},
{
"path": "0.Basic data partition.ntfs",
"type": "NTFS",
"label": "New Volume",
"file_system": "NTFS 3.1",
"created": mock.ANY,
},
],
},
}
scanner_event = run_test_scan(
mocker=mocker,
scan_class=ScanUnderTest,
fixture_path=Path(__file__).parent / "fixtures/test.vhdx",
)
TestCase.maxDiff = None
TestCase().assertDictEqual(test_scan_event, scanner_event)
|
10a44a5111cd4aed9d692442c832a3fc22c030d5
|
03380a2cf46385b0d971e150078d992cd7840980
|
/.github/workflows/source/ci_matrix.py
|
79f4d878c5d8f535481f26d4b0df3fc430f0a084
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause-LBNL"
] |
permissive
|
ECP-WarpX/WarpX
|
0cfc85ce306cc5d5caa3108e8e9aefe4fda44cd3
|
e052c6d1994e4edb66fc27763d72fcf08c3a112a
|
refs/heads/development
| 2023-08-19T07:50:12.539926
| 2023-08-17T20:01:41
| 2023-08-17T20:01:41
| 150,626,842
| 210
| 147
|
NOASSERTION
| 2023-09-13T23:38:13
| 2018-09-27T17:53:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
ci_matrix.py
|
#!/usr/bin/env python
# Concatenation of tests in each of the 6 elements in CI matrix
f = open('./ci_matrix_elements.txt') ; matrix_elements = f.readlines() ; f.close()
# All tests read by prepare_file_ci.py
f = open('./ci_all_tests.txt') ; all_tests = f.readlines() ; f.close()
# Now let's make sure these two are equal
# Remove these elements from both lists, as they are are not test names
elements_to_remove = ['[main]\n', '[AMReX]\n', '[source]\n', '[extra-PICSAR]\n']
for element in elements_to_remove:
for x in range(matrix_elements.count(element)):
matrix_elements.remove(element)
for x in range(all_tests.count(element)):
all_tests.remove(element)
# Sort lists, and make sure they are equal
matrix_elements.sort()
all_tests.sort()
print("Tests in matrix, but not in initial list (typically if a test is done twice):")
print(list(set(matrix_elements) - set(all_tests)))
print("Tests in initial list but not in the matrix:")
print(list(set(all_tests) - set(matrix_elements)))
assert( matrix_elements == all_tests )
|
e6104453fecfe933a0b51e333237071018d58530
|
ced3f2cdcfe770f2d018a4837c42326fb6e0d27d
|
/examples/graph/test_dice.py
|
d468609d12fb85747a7547864e5e79bf87442de2
|
[
"MIT",
"CC0-1.0",
"GPL-1.0-or-later"
] |
permissive
|
DSE-MSU/DeepRobust
|
308a4c03887eb1394a6d68b64ac3d7837b32f395
|
d25d95b33724af9ab0385d5171c989f9b4ff2359
|
refs/heads/master
| 2023-08-11T00:42:08.091214
| 2023-06-29T13:24:19
| 2023-06-29T13:24:19
| 210,014,892
| 978
| 200
|
MIT
| 2023-09-11T02:56:14
| 2019-09-21T16:09:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,585
|
py
|
test_dice.py
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.optim as optim
from deeprobust.graph.defense import GCN
from deeprobust.graph.global_attack import DICE
from deeprobust.graph.utils import *
from deeprobust.graph.data import Dataset
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=15, help='Random seed.')
parser.add_argument('--dataset', type=str, default='citeseer', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
parser.add_argument('--ptb_rate', type=float, default=0.05, help='pertubation rate')
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
print('cuda: %s' % args.cuda)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
data = Dataset(root='/tmp/', name=args.dataset)
adj, features, labels = data.adj, data.features, data.labels
idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
idx_unlabeled = np.union1d(idx_val, idx_test)
# Setup Attack Model
model = DICE()
n_perturbations = int(args.ptb_rate * (adj.sum()//2))
model.attack(adj, labels, n_perturbations)
modified_adj = model.modified_adj
adj, features, labels = preprocess(adj, features, labels, preprocess_adj=False, sparse=True, device=device)
modified_adj = normalize_adj(modified_adj)
modified_adj = sparse_mx_to_torch_sparse_tensor(modified_adj)
modified_adj = modified_adj.to(device)
def test(adj):
''' test on GCN '''
# adj = normalize_adj_tensor(adj)
gcn = GCN(nfeat=features.shape[1],
nhid=16,
nclass=labels.max().item() + 1,
dropout=0.5, device=device)
gcn = gcn.to(device)
optimizer = optim.Adam(gcn.parameters(),
lr=0.01, weight_decay=5e-4)
gcn.fit(features, adj, labels, idx_train) # train without model picking
# gcn.fit(features, adj, labels, idx_train, idx_val) # train with validation model picking
output = gcn.output
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
return acc_test.item()
def main():
print('=== testing GCN on original(clean) graph ===')
test(adj)
print('=== testing GCN on perturbed graph ===')
test(modified_adj)
if __name__ == '__main__':
main()
|
f85e0d95e3d0f43dca4226baceb1e703f8a3effc
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/examples/research_projects/distillation/scripts/binarized_data.py
|
951530d5c75aa6a8c52d880d820b7d1eae6037a5
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127
| 2023-09-05T19:21:33
| 2023-09-05T19:21:33
| 155,220,641
| 102,193
| 22,284
|
Apache-2.0
| 2023-09-14T20:44:49
| 2018-10-29T13:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,660
|
py
|
binarized_data.py
|
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocessing script before distillation.
"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPT2Tokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)."
)
parser.add_argument("--file_path", type=str, default="data/dump.txt", help="The path to the data.")
parser.add_argument("--tokenizer_type", type=str, default="bert", choices=["bert", "roberta", "gpt2"])
parser.add_argument("--tokenizer_name", type=str, default="bert-base-uncased", help="The tokenizer to use.")
parser.add_argument("--dump_file", type=str, default="data/dump", help="The dump file prefix.")
args = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})")
if args.tokenizer_type == "bert":
tokenizer = BertTokenizer.from_pretrained(args.tokenizer_name)
bos = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
sep = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
tokenizer = RobertaTokenizer.from_pretrained(args.tokenizer_name)
bos = tokenizer.special_tokens_map["cls_token"] # `<s>`
sep = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
tokenizer = GPT2Tokenizer.from_pretrained(args.tokenizer_name)
bos = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
sep = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}")
with open(args.file_path, "r", encoding="utf8") as fp:
data = fp.readlines()
logger.info("Start encoding")
logger.info(f"{len(data)} examples to process.")
rslt = []
iter = 0
interval = 10000
start = time.time()
for text in data:
text = f"{bos} {text.strip()} {sep}"
token_ids = tokenizer.encode(text, add_special_tokens=False)
rslt.append(token_ids)
iter += 1
if iter % interval == 0:
end = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl")
start = time.time()
logger.info("Finished binarization")
logger.info(f"{len(data)} examples processed.")
dp_file = f"{args.dump_file}.{args.tokenizer_name}.pickle"
vocab_size = tokenizer.vocab_size
if vocab_size < (1 << 16):
rslt_ = [np.uint16(d) for d in rslt]
else:
rslt_ = [np.int32(d) for d in rslt]
random.shuffle(rslt_)
logger.info(f"Dump to {dp_file}")
with open(dp_file, "wb") as handle:
pickle.dump(rslt_, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
|
dfa9213abc7a8077d5ad4480f4cd6e03ca8babeb
|
6c628b7b72eef4dbcc982803eb18c20a01d50a25
|
/brownie/project/compiler/solidity.py
|
85a55b06213c823f39f792d1d638b700ac26eb98
|
[
"MIT"
] |
permissive
|
eth-brownie/brownie
|
174c5cb549427f4814fa5a1dc9ede225acc983f8
|
bc7b511583060fdaff1d4b5269aedcc1cb710bc6
|
refs/heads/master
| 2023-09-04T15:53:39.804726
| 2023-06-12T07:27:29
| 2023-06-12T07:27:29
| 155,913,585
| 2,408
| 518
|
MIT
| 2023-09-06T14:20:17
| 2018-11-02T19:39:26
|
Python
|
UTF-8
|
Python
| false
| false
| 27,819
|
py
|
solidity.py
|
#!/usr/bin/python3
import logging
from collections import deque
from hashlib import sha1
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import solcast
import solcx
from requests.exceptions import ConnectionError
from semantic_version import Version
from solcast.nodes import NodeBase, is_inside_offset
from brownie._config import EVM_EQUIVALENTS
from brownie.exceptions import CompilerError, IncompatibleSolcVersion
from brownie.project.compiler.utils import _get_alias, expand_source_map
from . import sources
solcx_logger = logging.getLogger("solcx")
solcx_logger.setLevel(10)
sh = logging.StreamHandler()
sh.setLevel(10)
sh.setFormatter(logging.Formatter("%(message)s"))
solcx_logger.addHandler(sh)
AVAILABLE_SOLC_VERSIONS = None
EVM_VERSION_MAPPING = [
("istanbul", Version("0.5.13")),
("petersburg", Version("0.5.5")),
("byzantium", Version("0.4.0")),
]
# error codes used in Solidity >=0.8.0
# docs.soliditylang.org/en/v0.8.0/control-structures.html#panic-via-assert-and-error-via-require
SOLIDITY_ERROR_CODES = {
1: "Failed assertion",
17: "Integer overflow",
18: "Division or modulo by zero",
33: "Conversion to enum out of bounds",
24: "Access to storage byte array that is incorrectly encoded",
49: "Pop from empty array",
50: "Index out of range",
65: "Attempted to allocate too much memory",
81: "Call to zero-initialized variable of internal function type",
}
def get_version() -> Version:
return solcx.get_solc_version(with_commit_hash=True)
def compile_from_input_json(
input_json: Dict, silent: bool = True, allow_paths: Optional[str] = None
) -> Dict:
"""
Compiles contracts from a standard input json.
Args:
input_json: solc input json
silent: verbose reporting
allow_paths: compiler allowed filesystem import path
Returns: standard compiler output json
"""
optimizer = input_json["settings"]["optimizer"]
input_json["settings"].setdefault("evmVersion", None)
if input_json["settings"]["evmVersion"] in EVM_EQUIVALENTS:
input_json["settings"]["evmVersion"] = EVM_EQUIVALENTS[input_json["settings"]["evmVersion"]]
if not silent:
print(f"Compiling contracts...\n Solc version: {str(solcx.get_solc_version())}")
opt = f"Enabled Runs: {optimizer['runs']}" if optimizer["enabled"] else "Disabled"
print(f" Optimizer: {opt}")
if input_json["settings"]["evmVersion"]:
print(f" EVM Version: {input_json['settings']['evmVersion'].capitalize()}")
try:
return solcx.compile_standard(input_json, allow_paths=allow_paths)
except solcx.exceptions.SolcError as e:
raise CompilerError(e, "solc")
def set_solc_version(version: Union[str, Version]) -> str:
"""Sets the solc version. If not available it will be installed."""
if not isinstance(version, Version):
version = Version(version.lstrip("v"))
if version < Version("0.4.22"):
raise IncompatibleSolcVersion("Brownie only supports Solidity versions >=0.4.22")
try:
solcx.set_solc_version(version, silent=True)
except solcx.exceptions.SolcNotInstalled:
if version not in _get_solc_version_list()[0]:
raise IncompatibleSolcVersion(
f"Cannot install Solidity v{version} on this OS. You may be able to "
f"manually compile from source with `solcx.compile_solc('{version}')`"
)
install_solc(version)
solcx.set_solc_version(version, silent=True)
return str(solcx.get_solc_version())
def install_solc(*versions: Union[Version, str]) -> None:
"""Installs solc versions."""
for version in versions:
solcx.install_solc(version, show_progress=True)
def get_abi(contract_source: str, allow_paths: Optional[str] = None) -> Dict:
"""
Given a contract source, returns a dict of {name: abi}
This function is deprecated in favor of `brownie.project.compiler.get_abi`
"""
version = find_best_solc_version({"<stdin>": contract_source})
set_solc_version(version)
compiled = solcx.compile_source(
contract_source, allow_empty=True, allow_paths=allow_paths, output_values=["abi"]
)
return {k.rsplit(":")[-1]: v["abi"] for k, v in compiled.items()}
def find_solc_versions(
contract_sources: Dict[str, str],
install_needed: bool = False,
install_latest: bool = False,
silent: bool = True,
) -> Dict:
"""
Analyzes contract pragmas and determines which solc version(s) to use.
Args:
contract_sources: a dictionary in the form of {'path': "source code"}
install_needed: if True, will install when no installed version matches
the contract pragma
install_latest: if True, will install when a newer version is available
than the installed one
silent: set to False to enable verbose reporting
Returns: dictionary of {'version': ['path', 'path', ..]}
"""
available_versions, installed_versions = _get_solc_version_list()
pragma_specs: Dict = {}
to_install = set()
new_versions = set()
for path, source in contract_sources.items():
pragma_specs[path] = sources.get_pragma_spec(source, path)
version = pragma_specs[path].select(installed_versions)
if not version and not (install_needed or install_latest):
raise IncompatibleSolcVersion(
f"No installed solc version matching '{pragma_specs[path]}' in '{path}'"
)
# if no installed version of solc matches the pragma, find the latest available version
latest = pragma_specs[path].select(available_versions)
if not version and not latest:
raise IncompatibleSolcVersion(
f"No installable solc version matching '{pragma_specs[path]}' in '{path}'"
)
if not version or (install_latest and latest > version):
to_install.add(latest)
elif latest and latest > version:
new_versions.add(str(version))
# install new versions if needed
if to_install:
install_solc(*to_install)
installed_versions = solcx.get_installed_solc_versions()
elif new_versions and not silent:
print(
f"New compatible solc version{'s' if len(new_versions) > 1 else ''}"
f" available: {', '.join(new_versions)}"
)
# organize source paths by latest available solc version
compiler_versions: Dict = {}
for path, spec in pragma_specs.items():
version = spec.select(installed_versions)
compiler_versions.setdefault(str(version), []).append(path)
return compiler_versions
def find_best_solc_version(
contract_sources: Dict[str, str],
install_needed: bool = False,
install_latest: bool = False,
silent: bool = True,
) -> str:
"""
Analyzes contract pragmas and finds the best version compatible with all sources.
Args:
contract_sources: a dictionary in the form of {'path': "source code"}
install_needed: if True, will install when no installed version matches
the contract pragma
install_latest: if True, will install when a newer version is available
than the installed one
silent: set to False to enable verbose reporting
Returns: version string
"""
available_versions, installed_versions = _get_solc_version_list()
for path, source in contract_sources.items():
pragma_spec = sources.get_pragma_spec(source, path)
installed_versions = [i for i in installed_versions if i in pragma_spec]
available_versions = [i for i in available_versions if i in pragma_spec]
if not available_versions:
raise IncompatibleSolcVersion("No installable solc version compatible across all sources")
if not installed_versions and not (install_needed or install_latest):
raise IncompatibleSolcVersion("No installed solc version compatible across all sources")
if max(available_versions) > max(installed_versions, default=Version("0.0.0")):
if install_latest or (install_needed and not installed_versions):
install_solc(max(available_versions))
return str(max(available_versions))
if not silent:
print(f"New compatible solc version available: {max(available_versions)}")
return str(max(installed_versions))
def _get_solc_version_list() -> Tuple[List, List]:
global AVAILABLE_SOLC_VERSIONS
installed_versions = solcx.get_installed_solc_versions()
if AVAILABLE_SOLC_VERSIONS is None:
try:
AVAILABLE_SOLC_VERSIONS = solcx.get_installable_solc_versions()
except ConnectionError:
if not installed_versions:
raise ConnectionError("Solc not installed and cannot connect to GitHub")
AVAILABLE_SOLC_VERSIONS = installed_versions
return AVAILABLE_SOLC_VERSIONS, installed_versions
def _get_unique_build_json(
output_evm: Dict, contract_node: Any, stmt_nodes: Dict, branch_nodes: Dict, has_fallback: bool
) -> Dict:
paths = {
str(i.contract_id): i.parent().absolutePath
for i in [contract_node] + contract_node.dependencies
}
bytecode = _format_link_references(output_evm)
without_metadata = _remove_metadata(output_evm["deployedBytecode"]["object"])
instruction_count = len(without_metadata) // 2
pc_map, statement_map, branch_map = _generate_coverage_data(
output_evm["deployedBytecode"]["sourceMap"],
output_evm["deployedBytecode"]["opcodes"],
contract_node,
stmt_nodes,
branch_nodes,
has_fallback,
instruction_count,
)
dependencies = []
for node in [i for i in contract_node.dependencies if i.nodeType == "ContractDefinition"]:
# use contract aliases when recording dependencies, to avoid
# potential namespace collisions when importing across projects
name = node.name
path_str = node.parent().absolutePath
dependencies.append(_get_alias(name, path_str))
return {
"allSourcePaths": paths,
"bytecode": bytecode,
"bytecodeSha1": sha1(_remove_metadata(bytecode).encode()).hexdigest(),
"coverageMap": {"statements": statement_map, "branches": branch_map},
"dependencies": dependencies,
"offset": contract_node.offset,
"pcMap": pc_map,
"type": contract_node.contractKind,
}
def _format_link_references(evm: Dict) -> str:
# Standardizes formatting for unlinked libraries within bytecode
bytecode = evm["bytecode"]["object"]
references = [
(k, x) for v in evm["bytecode"].get("linkReferences", {}).values() for k, x in v.items()
]
for n, loc in [(i[0], x["start"] * 2) for i in references for x in i[1]]:
bytecode = f"{bytecode[:loc]}__{n[:36]:_<36}__{bytecode[loc+40:]}"
return bytecode
def _remove_metadata(bytecode: str) -> str:
if not bytecode:
return ""
idx = -(int(bytecode[-4:], 16) + 2) * 2
return bytecode[:idx]
def _generate_coverage_data(
source_map_str: str,
opcodes_str: str,
contract_node: Any,
stmt_nodes: Dict,
branch_nodes: Dict,
has_fallback: bool,
instruction_count: int,
) -> Tuple:
# Generates data used by Brownie for debugging and coverage evaluation
if not opcodes_str:
return {}, {}, {}
source_map = deque(expand_source_map(source_map_str))
opcodes = deque(opcodes_str.split(" "))
contract_nodes = [contract_node] + contract_node.dependencies
source_nodes = {str(i.contract_id): i.parent() for i in contract_nodes}
stmt_nodes = {i: stmt_nodes[i].copy() for i in source_nodes}
statement_map: Dict = {i: {} for i in source_nodes}
# possible branch offsets
branch_original = {i: branch_nodes[i].copy() for i in source_nodes}
branch_nodes = {i: set(i.offset for i in branch_nodes[i]) for i in source_nodes}
# currently active branches, awaiting a jumpi
branch_active: Dict = {i: {} for i in source_nodes}
# branches that have been set
branch_set: Dict = {i: {} for i in source_nodes}
count, pc = 0, 0
pc_list: List = []
revert_map: Dict = {}
fallback_hexstr: str = "unassigned"
optimizer_revert = False if get_version() >= Version("0.8.0") else True
active_source_node: Optional[NodeBase] = None
active_fn_node: Optional[NodeBase] = None
active_fn_name: Optional[str] = None
first_source = source_map[0]
while source_map and source_map[-1][2] == -1:
# trim the end of the source map where there are no contracts associated
# this is required because sometimes the source map is too long
# likely a side effect of the YUL optimizer ¯\_(ツ)_/¯
source_map.pop()
while source_map:
# format of source_map is [start, stop, contract_id, jump code]
source = source_map.popleft()
pc_list.append({"op": opcodes.popleft(), "pc": pc})
if (
has_fallback is False
and fallback_hexstr == "unassigned"
and pc_list[-1]["op"] == "REVERT"
and [i["op"] for i in pc_list[-4:-1]] == ["JUMPDEST", "PUSH1", "DUP1"]
):
# flag the REVERT op at the end of the function selector,
# later reverts may jump to it instead of having their own REVERT op
fallback_hexstr = f"0x{hex(pc - 4).upper()[2:]}"
pc_list[-1]["first_revert"] = True
if source[3] != "-":
pc_list[-1]["jump"] = source[3]
pc += 1
if pc_list[-1]["op"].startswith("PUSH") and opcodes[0][:2] == "0x":
pc_list[-1]["value"] = opcodes.popleft()
pc += int(pc_list[-1]["op"][4:])
# for REVERT opcodes without an source offset, try to infer one
if source[2] == -1 or source == first_source:
if pc_list[-1]["op"] == "REVERT":
_find_revert_offset(
pc_list, source_map, active_source_node, active_fn_node, active_fn_name
)
if source[2] == -1:
continue
# set contract path (-1 means none)
contract_id = str(source[2])
if contract_id not in source_nodes:
# In Solidity >=0.7.2 contract ID can reference an AST within the YUL-optimization
# "generatedSources". Brownie does not support coverage evaluation within these
# sources, so we consider to this to be unmapped.
continue
active_source_node = source_nodes[contract_id]
pc_list[-1]["path"] = contract_id
# set source offset (-1 means none)
if source[0] == -1:
continue
offset = (source[0], source[0] + source[1])
pc_list[-1]["offset"] = offset
if pc_list[-1]["op"] == "REVERT" and not optimizer_revert:
# In Solidity >=0.8.0, an optimization is applied to reverts with an error string
# such that all reverts appear to happen at the same point in the source code.
# We mark this REVERT as the "optimizer revert" so that when it's encountered in
# a trace we know to look back to find the actual revert location.
fn_node = active_source_node.children(
include_parents=False,
include_children=True,
required_offset=offset,
filters=(
{"nodeType": "FunctionCall", "expression.name": "revert"},
{"nodeType": "FunctionCall", "expression.name": "require"},
),
)
if fn_node:
args = len(fn_node[0].arguments)
if args == 2 or (fn_node[0].expression.name == "revert" and args):
optimizer_revert = True
pc_list[-1]["optimizer_revert"] = True
# add error messages for INVALID opcodes
if pc_list[-1]["op"] == "INVALID":
_set_invalid_error_string(active_source_node, pc_list[-1])
# for JUMPI instructions, set active branch markers
if branch_active[contract_id] and pc_list[-1]["op"] == "JUMPI":
for offset in branch_active[contract_id]:
# ( program counter index, JUMPI index)
branch_set[contract_id][offset] = (
branch_active[contract_id][offset],
len(pc_list) - 1,
)
branch_active[contract_id].clear()
# if op relates to previously set branch marker, clear it
elif offset in branch_nodes[contract_id]:
if offset in branch_set[contract_id]:
del branch_set[contract_id][offset]
branch_active[contract_id][offset] = len(pc_list) - 1
try:
# set fn name and statement coverage marker
if "offset" in pc_list[-2] and offset == pc_list[-2]["offset"]:
pc_list[-1]["fn"] = active_fn_name
else:
active_fn_node, active_fn_name = _get_active_fn(active_source_node, offset)
pc_list[-1]["fn"] = active_fn_name
stmt_offset = next(
i for i in stmt_nodes[contract_id] if sources.is_inside_offset(offset, i)
)
stmt_nodes[contract_id].discard(stmt_offset)
statement_map[contract_id].setdefault(active_fn_name, {})[count] = stmt_offset
pc_list[-1]["statement"] = count
count += 1
except (KeyError, IndexError, StopIteration):
pass
if pc_list[-1].get("value", None) == fallback_hexstr and opcodes[0] in ("JUMP", "JUMPI"):
# track all jumps to the initial revert
key = (pc_list[-1]["path"], pc_list[-1]["offset"])
revert_map.setdefault(key, []).append(len(pc_list))
while opcodes[0] not in ("INVALID", "STOP") and pc < instruction_count:
# necessary because sometimes solidity returns an incomplete source map
pc_list.append({"op": opcodes.popleft(), "pc": pc})
pc += 1
if pc_list[-1]["op"].startswith("PUSH") and opcodes[0][:2] == "0x":
pc_list[-1]["value"] = opcodes.popleft()
pc += int(pc_list[-1]["op"][4:])
# compare revert and require statements against the map of revert jumps
for (contract_id, fn_offset), values in revert_map.items():
fn_node = source_nodes[contract_id].children(
depth=2,
include_children=False,
required_offset=fn_offset,
filters={"nodeType": "FunctionDefinition"},
)
if len(fn_node) == 0:
# In Solidity >=0.8.13, with the viaIR option set, there is a dispatch
# function present in the generated bytecode
continue
revert_nodes = fn_node[0].children(
filters=(
{"nodeType": "FunctionCall", "expression.name": "revert"},
{"nodeType": "FunctionCall", "expression.name": "require"},
)
)
for node in revert_nodes:
offset = node.offset
# if the node offset is not in the source map, apply it's offset to the JUMPI op
if not next((x for x in pc_list if "offset" in x and x["offset"] == offset), False):
pc_list[values[0]].update(offset=offset, jump_revert=True)
del values[0]
# set branch index markers and build final branch map
branch_map: Dict = {i: {} for i in source_nodes}
for path, offset, idx in [(k, x, y) for k, v in branch_set.items() for x, y in v.items()]:
# for branch to be hit, need an op relating to the source and the next JUMPI
# this is because of how the compiler optimizes nested BinaryOperations
if "fn" in pc_list[idx[0]]:
fn = pc_list[idx[0]]["fn"]
pc_list[idx[0]]["branch"] = count
pc_list[idx[1]]["branch"] = count
node = next(i for i in branch_original[path] if i.offset == offset)
branch_map[path].setdefault(fn, {})[count] = offset + (node.jump,)
count += 1
pc_map = {i.pop("pc"): i for i in pc_list}
return pc_map, statement_map, branch_map
def _find_revert_offset(
pc_list: List,
source_map: deque,
source_node: NodeBase,
fn_node: NodeBase,
fn_name: Optional[str],
) -> None:
# attempt to infer a source offset for reverts that do not have one
if source_map:
# is not the last instruction
if len(pc_list) >= 8 and pc_list[-8]["op"] == "CALLVALUE":
# reference to CALLVALUE 8 instructions previous is a nonpayable function check
pc_list[-1].update(
dev="Cannot send ether to nonpayable function",
fn=pc_list[-8].get("fn", "<unknown>"),
offset=pc_list[-8].get("offset"),
path=pc_list[-8].get("path"),
)
return
# if there is active function, we are still in the function selector table
if not fn_node:
return
# get the offset of the next instruction
next_offset = None
if source_map and source_map[0][2] != -1:
next_offset = (source_map[0][0], source_map[0][0] + source_map[0][1])
# if the next instruction offset is not equal to the offset of the active function,
# but IS contained within the active function, apply this offset to the current
# instruction
if (
next_offset
and next_offset != fn_node.offset
and is_inside_offset(next_offset, fn_node.offset)
):
pc_list[-1].update(path=str(source_node.contract_id), fn=fn_name, offset=next_offset)
return
# if any of the previous conditions are not satisfied, this is the final revert
# statement within a function
if fn_node[-1].nodeType == "ExpressionStatement":
expr = fn_node[-1].expression
if expr.nodeType == "FunctionCall" and expr.get("expression.name") in ("revert", "require"):
pc_list[-1].update(
path=str(source_node.contract_id), fn=fn_name, offset=expr.expression.offset
)
def _set_invalid_error_string(source_node: NodeBase, pc_map: Dict) -> None:
# set custom error string for INVALID opcodes
try:
node = source_node.children(include_children=False, offset_limits=pc_map["offset"])[0]
except IndexError:
return
if node.nodeType == "IndexAccess":
pc_map["dev"] = "Index out of range"
elif node.nodeType == "BinaryOperation":
if node.operator == "/":
pc_map["dev"] = "Division by zero"
elif node.operator == "%":
pc_map["dev"] = "Modulus by zero"
def _get_active_fn(source_node: NodeBase, offset: Tuple[int, int]) -> Tuple[NodeBase, str]:
fn_node = source_node.children(
depth=2, required_offset=offset, filters={"nodeType": "FunctionDefinition"}
)[0]
name = getattr(fn_node, "name", None)
if not name:
if getattr(fn_node, "kind", "function") != "function":
name = f"<{fn_node.kind}>"
elif getattr(fn_node, "isConstructor", False):
name = "<constructor>"
else:
name = "<fallback>"
parent = fn_node.parent()
if parent.nodeType == "SourceUnit":
# the function exists outside a contract
return fn_node, name
return fn_node, f"{fn_node.parent().name}.{name}"
def _get_nodes(output_json: Dict) -> Tuple[Dict, Dict, Dict]:
source_nodes = solcast.from_standard_output(output_json)
stmt_nodes = _get_statement_nodes(source_nodes)
branch_nodes = _get_branch_nodes(source_nodes)
return source_nodes, stmt_nodes, branch_nodes
def _get_statement_nodes(source_nodes: Dict) -> Dict:
# Given a list of source nodes, returns a dict of lists of statement nodes
statements = {}
for node in source_nodes:
statements[str(node.contract_id)] = set(
i.offset
for i in node.children(
include_parents=False,
filters={"baseNodeType": "Statement"},
exclude_filter={"isConstructor": True},
)
)
return statements
def _get_branch_nodes(source_nodes: List) -> Dict:
# Given a list of source nodes, returns a dict of lists of nodes corresponding
# to possible branches in the code
branches: Dict = {}
for node in source_nodes:
branches[str(node.contract_id)] = set()
for contract_node in node.children(depth=1, filters={"nodeType": "ContractDefinition"}):
for child_node in [
x
for i in contract_node
for x in i.children(
filters=(
{"nodeType": "FunctionCall", "expression.name": "require"},
{"nodeType": "IfStatement"},
{"nodeType": "Conditional"},
)
)
]:
branches[str(node.contract_id)] |= _get_recursive_branches(child_node)
return branches
def _get_recursive_branches(base_node: Any) -> Set:
# if node is IfStatement or Conditional, look only at the condition
node = base_node if base_node.nodeType == "FunctionCall" else base_node.condition
# for IfStatement, jumping indicates evaluating false
jump_is_truthful = base_node.nodeType != "IfStatement"
filters = (
{"nodeType": "BinaryOperation", "typeDescriptions.typeString": "bool", "operator": "||"},
{"nodeType": "BinaryOperation", "typeDescriptions.typeString": "bool", "operator": "&&"},
)
all_binaries = node.children(include_parents=True, include_self=True, filters=filters)
# if no BinaryOperation nodes are found, this node is the branch
if not all_binaries:
# if node is FunctionCall, look at the first argument
if base_node.nodeType == "FunctionCall":
node = node.arguments[0]
# some versions of solc do not map IfStatement unary opertions to bytecode
elif node.nodeType == "UnaryOperation":
node = node.subExpression
node.jump = jump_is_truthful
return set([node])
# look at children of BinaryOperation nodes to find all possible branches
binary_branches = set()
for node in (x for i in all_binaries for x in (i.leftExpression, i.rightExpression)):
if node.children(include_self=True, filters=filters):
continue
_jump = jump_is_truthful
if not _is_rightmost_operation(node, base_node.depth):
_jump = _check_left_operator(node, base_node.depth)
if node.nodeType == "UnaryOperation":
node = node.subExpression
node.jump = _jump
binary_branches.add(node)
return binary_branches
def _is_rightmost_operation(node: NodeBase, depth: int) -> bool:
# Check if the node is the final operation within the expression
parents = node.parents(
depth, {"nodeType": "BinaryOperation", "typeDescriptions.typeString": "bool"}
)
return not next(
(i for i in parents if i.leftExpression == node or node.is_child_of(i.leftExpression)),
False,
)
def _check_left_operator(node: NodeBase, depth: int) -> bool:
# Find the nearest parent boolean where this node sits on the left side of
# the comparison, and return True if that node's operator is ||
parents = node.parents(
depth, {"nodeType": "BinaryOperation", "typeDescriptions.typeString": "bool"}
)
op = next(
i for i in parents if i.leftExpression == node or node.is_child_of(i.leftExpression)
).operator
return op == "||"
|
316c0fecafa033b8585f1103944a6a9460623e63
|
f1973e136f49f0b5ea2ec63c4d862188d197e5a5
|
/vms/migrations/0005_subnet_vxlan_id.py
|
e8e96b995795fcfed2556c740e304e6f56cfe344
|
[
"Apache-2.0"
] |
permissive
|
erigones/esdc-ce
|
65dc7d84e1bca3e3fcec668f54acae20183096a2
|
7e3dedddbe821283d909393f333eed4acd452953
|
refs/heads/master
| 2023-02-07T17:57:15.970089
| 2022-02-03T12:55:14
| 2022-02-03T12:55:14
| 73,122,985
| 123
| 36
|
Apache-2.0
| 2023-01-24T23:22:54
| 2016-11-07T21:34:53
|
Python
|
UTF-8
|
Python
| false
| false
| 468
|
py
|
0005_subnet_vxlan_id.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vms', '0004_vm_node_add_note_field'),
]
operations = [
migrations.AddField(
model_name='subnet',
name='vxlan_id',
field=models.PositiveIntegerField(default=None, null=True, verbose_name='VXLAN segment ID', blank=True),
),
]
|
9f3479e2ffe585c9f9317babfc9abdf01ed18d91
|
fcab2679a5c5030c54bbee10857a64263e8d7b46
|
/tests/lib/nftables_test.py
|
ba0986114b2caddcdfd4c0d6cc123fd657d7a786
|
[
"Apache-2.0"
] |
permissive
|
google/capirca
|
38eb9339e565b30667d264e18e9b81ee0ac41d18
|
d145ca447e0e04895507777b8c5834c22e90df11
|
refs/heads/master
| 2023-08-28T02:37:19.814474
| 2023-08-23T13:59:33
| 2023-08-23T13:59:33
| 40,198,544
| 743
| 238
|
Apache-2.0
| 2023-09-11T20:02:40
| 2015-08-04T17:25:11
|
Python
|
UTF-8
|
Python
| false
| false
| 25,829
|
py
|
nftables_test.py
|
# Copyright 2023 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for Nftables rendering module."""
import datetime
import re
from unittest import mock
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
from capirca.lib import aclgenerator
from capirca.lib import nacaddr
from capirca.lib import naming
from capirca.lib import nftables
from capirca.lib import policy
class DictObj:
"""Helper class to use a dictionary of dictionaries to form an object.
We can then specifically test using it.
"""
def __init__(self, in_dict: dict):
assert isinstance(in_dict, dict)
for key, val in in_dict.items():
if isinstance(val, (list, tuple)):
setattr(
self, key, [DictObj(x) if isinstance(x, dict) else x for x in val]
)
else:
setattr(self, key, DictObj(val) if isinstance(val, dict) else val)
# "logging" is not a token.
SUPPORTED_TOKENS = frozenset({
'action',
'comment',
'destination_address',
'destination_address_exclude',
'destination_port',
'expiration',
'icmp_type',
'name', # obj attribute, not token
'option',
'protocol',
'platform',
'platform_exclude',
'source_interface', # input interface
'source_address',
'source_address_exclude',
'source_port',
'destination_interface', # ouput interface
'translated', # obj attribute, not token
'stateless_reply',
})
SUPPORTED_SUB_TOKENS = {
'action': {'accept', 'deny'},
'option': {'established', 'tcp-established'},
'icmp_type': {
'alternate-address',
'certification-path-advertisement',
'certification-path-solicitation',
'conversion-error',
'destination-unreachable',
'echo-reply',
'echo-request',
'mobile-redirect',
'home-agent-address-discovery-reply',
'home-agent-address-discovery-request',
'icmp-node-information-query',
'icmp-node-information-response',
'information-request',
'inverse-neighbor-discovery-advertisement',
'inverse-neighbor-discovery-solicitation',
'mask-reply',
'mask-request',
'information-reply',
'mobile-prefix-advertisement',
'mobile-prefix-solicitation',
'multicast-listener-done',
'multicast-listener-query',
'multicast-listener-report',
'multicast-router-advertisement',
'multicast-router-solicitation',
'multicast-router-termination',
'neighbor-advertisement',
'neighbor-solicit',
'packet-too-big',
'parameter-problem',
'redirect',
'redirect-message',
'router-advertisement',
'router-renumbering',
'router-solicit',
'router-solicitation',
'source-quench',
'time-exceeded',
'timestamp-reply',
'timestamp-request',
'unreachable',
'version-2-multicast-listener-report',
},
}
# IP address data, to be loaded onto policy and test rendering.
TEST_IPV4_ONLY = [nacaddr.IP('10.2.3.4/32')]
TEST_IPV6_ONLY = [nacaddr.IP('2001:4860:8000::5/128')]
TEST_IPS = [nacaddr.IP('10.2.3.4/32'), nacaddr.IP('2001:4860:8000::5/128')]
HEADER_TEMPLATE = """
header {
target:: nftables %s
}
"""
HEAD_OVERRIDE_DEFAULT_ACTION = """
header {
target:: nftables inet output ACCEPT
}
"""
HEADER_COMMENT = """
header {
comment:: "Noverbose + custom priority policy example"
target:: nftables inet output ACCEPT
}
"""
HEADER_MIXED_AF = """
header {
target:: nftables mixed output
}
"""
HEADER_IPV4_AF = """
header {
target:: nftables inet output
}
"""
HEADER_IPV6_AF = """
header {
target:: nftables inet6 output
}
"""
HEADER_NOVERBOSE = """
header {
target:: nftables mixed output noverbose
}
"""
GOOD_HEADER_1 = """
header {
target:: nftables inet6 INPUT
}
"""
GOOD_HEADER_2 = """
header {
target:: nftables mixed output accept
}
"""
GOOD_HEADER_3 = """
header {
target:: nftables inet input
}
"""
DENY_TERM = """
term deny-term {
comment:: "Dual-stack IPv4/v6 deny all"
action:: deny
}
"""
# Input interface name test term.
SOURCE_INTERFACE_TERM = """
term src-interface-term {
source-interface:: eth123
protocol:: tcp
action:: accept
}
"""
# Output interface name test term.
DESTINATION_INTERFACE_TERM = """
term dst-interface-term {
destination-interface:: eth123
protocol:: tcp
action:: accept
}
"""
BAD_INTERFACE_TERM = """
term dst-interface-term {
source-interface:: eth123
destination-interface:: eth123
protocol:: tcp
action:: accept
}
"""
ESTABLISHED_OPTION_TERM = """
term established-term {
protocol:: udp
option:: established
action:: accept
}
"""
TCP_ESTABLISHED_OPTION_TERM = """
term tcp-established-term {
protocol:: tcp
option:: tcp-established
action:: accept
}
"""
ICMP_TERM = """
term good-icmp {
protocol:: icmp
action:: accept
}
"""
ICMP_SINGLE_TYPE = """
term good-icmp-single-type {
comment:: "IPv4 ICMP accept single type"
icmp-type:: router-solicit
protocol:: icmp
action:: accept
}
"""
ICMPV6_TERM = """
term good-icmpv6 {
protocol:: icmpv6
action:: accept
}
"""
ICMPV6_SINGLE_TYPE = """
term good-icmpv6-single-type {
comment:: "IPv6 ICMP accept single type"
icmp-type:: router-solicit
protocol:: icmpv6
action:: accept
}
"""
ICMPV6_MULTI_TERM = """
term good-icmpv6-type {
comment:: "IPv6 ICMP accept many types"
icmp-type:: router-solicit router-advertisement neighbor-advertisement neighbor-solicit
protocol:: icmpv6
action:: accept
}
"""
COMMENT_TERM = """
term good-icmpv6-type {
comment:: "This term has a comment"
protocol:: tcp
action:: accept
}
"""
NOCOMMENT_TERM = """
term good-icmpv6-type {
protocol:: tcp
action:: accept
}
"""
LOGGING_TERM = """
term log-packets {
logging:: true
action:: accept
}
"""
COUNTER_TERM = """
term count-packets {
counter:: thisnameisignored
action:: accept
}
"""
COUNT_AND_LOG_TERM = """
term count-and-log-packets {
logging:: true
counter:: thisnameisignored
action:: accept
}
"""
GOOD_TERM_1 = """
term good-term-1 {
action:: accept
}
"""
GOOD_TERM_2 = """
term good-term-2 {
protocol:: tcp
action:: accept
destination-port:: SSH
destination-address:: TEST_NET
}
"""
IPV6_ONLY_TERM = """
term ip6-only {
destination-address:: TEST_IPV6_ONLY
action:: accept
}
"""
IPV6_SRCIP = """
term ip6-src-addr {
source-address:: TEST_IPV6_ONLY
action:: deny
}
"""
IPV4_SRCIP = """
term ip4-src-addr {
source-address:: TEST_IPV4_ONLY
action:: deny
}
"""
ALL_SRCIP = """
term all-src-addr {
comment:: "All IP address families. v4/v6"
source-address:: TEST_IPS
action:: deny
}
"""
EXCLUDE = {'ip6': [nacaddr.IP('::/3'), nacaddr.IP('::/0')]}
# Print a info message when a term is set to expire in that many weeks.
# This is normally passed from command line.
EXP_INFO = 2
def IPhelper(addresses):
"""Helper for string to nacaddr.IP conversion for parametized tests."""
normalized = []
if not addresses:
# if empty list of addresses.
return addresses
else:
for addr in addresses:
normalized.append(nacaddr.IP(addr))
return normalized
class NftablesTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.naming = mock.create_autospec(naming.Naming)
self.dummyterm = nftables.Term('', '', '')
@parameterized.parameters(('ip protocol tcp', ' ip protocol tcp'), ('', ''))
def testAdd(self, statement, expected_output):
result = nftables.Add(statement)
self.assertEqual(result, expected_output)
@parameterized.parameters((2, 'chain acl_name', ' chain acl_name'))
def testTabSpacer(self, num_spaces, statement, expected_output):
result = nftables.TabSpacer(num_spaces, statement)
self.assertEqual(result, expected_output)
@parameterized.parameters(
(
'ip',
['200.1.1.3/32', '9782:b30a:e5c6:1aa4:29ff:e57c:44a0:1b84'],
['200.1.1.3/32', '2606:4700:4700::1111'],
['ip saddr 200.1.1.3/32 ip daddr 200.1.1.3/32'],
),
(
'ip',
['200.1.1.3/32', '200.1.1.4/32'],
['200.1.1.3/32', '200.1.1.4/32'],
[
'ip saddr { 200.1.1.3/32, 200.1.1.4/32 } ip daddr { 200.1.1.3/32,'
' 200.1.1.4/32 }'
],
),
(
'ip6',
['8.8.8.8', '9782:b30a:e5c6:1aa4:29ff:e57c:44a0:1b84'],
['200.1.1.3/32', '2606:4700:4700::1111'],
[
'ip6 saddr 9782:b30a:e5c6:1aa4:29ff:e57c:44a0:1b84/128 ip6 daddr'
' 2606:4700:4700::1111/128'
],
),
(
'ip6',
['2606:4700:4700::1111', '2606:4700:4700::1112'],
['2606:4700:4700::1111', '2606:4700:4700::1112'],
[
'ip6 saddr { 2606:4700:4700::1111/128, 2606:4700:4700::1112/128 }'
' ip6 daddr { 2606:4700:4700::1111/128,'
' 2606:4700:4700::1112/128 }'
],
),
)
def test_AddrStatement(self, af, src_addr, dst_addr, expected):
# Necessary object format.
src_obj = IPhelper(src_addr)
dst_obj = IPhelper(dst_addr)
result = self.dummyterm._AddrStatement(af, src_obj, dst_obj)
self.assertEqual(result, expected)
@parameterized.parameters(
(
['nd-router-advert', 'nd-neighbor-solicit', 'nd-neighbor-advert'],
'{ nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert }',
),
(['200.1.1.3/32'], '200.1.1.3/32'),
(['1.1.1.1', '8.8.8.8'], '{ 1.1.1.1, 8.8.8.8 }'),
(['tcp', 'udp', 'icmp'], '{ tcp, udp, icmp }'),
(['80', '443'], '{ 80, 443 }'),
('53', '53'),
)
def testCreateAnonymousSet(self, input_data, expected):
result = self.dummyterm.CreateAnonymousSet(input_data)
self.assertEqual(result, expected)
@parameterized.parameters(
(
'',
[
'ip6 saddr 2606:4700:4700::1111/128 ip6 daddr {'
' 2001:4860:4860::8844/128, 2001:4860:4860::8888/128 }'
],
['tcp sport 80 tcp dport 80'],
(
'ct state { ESTABLISHED, RELATED } log prefix'
' "combo_cnt_log_established" counter'
),
'accept',
'',
[
'ip6 saddr 2606:4700:4700::1111/128 ip6 daddr {'
' 2001:4860:4860::8844/128, 2001:4860:4860::8888/128 } tcp sport'
' 80 tcp dport 80 ct state { ESTABLISHED, RELATED } log prefix'
' "combo_cnt_log_established" counter accept'
],
),
(
'',
['ip daddr 8.8.8.8/32'],
['tcp sport 53 tcp dport 53'],
'ct state new',
'accept',
'comment "this is a term with a comment"',
[
'ip daddr 8.8.8.8/32 tcp sport 53 tcp dport 53 ct state new'
' accept comment "this is a term with a comment"'
],
),
)
def testGroupExpressions(
self,
int_str,
address_expr,
porst_proto_expr,
opt,
verdict,
comment,
expected_output,
):
result = self.dummyterm.GroupExpressions(
int_str, address_expr, porst_proto_expr, opt, verdict, comment
)
self.assertEqual(result, expected_output)
def testBadInterfaceTerm(self):
pol = policy.ParsePolicy(
GOOD_HEADER_1 + GOOD_TERM_1 + BAD_INTERFACE_TERM, self.naming
)
with self.assertRaises(nftables.TermError):
nftables.Nftables.__init__(
nftables.Nftables.__new__(nftables.Nftables), pol, EXP_INFO
)
def testDuplicateTerm(self):
pol = policy.ParsePolicy(
GOOD_HEADER_1 + GOOD_TERM_1 + GOOD_TERM_1, self.naming
)
with self.assertRaises(nftables.TermError):
nftables.Nftables.__init__(
nftables.Nftables.__new__(nftables.Nftables), pol, EXP_INFO
)
@parameterized.parameters(
([(80, 80)], '80'), ([(1024, 65535)], '1024-65535'), ([], '')
)
def testGroup(self, data, expected_output):
"""Test _Group function we use in Ports."""
result = self.dummyterm._Group(data)
self.assertEqual(result, expected_output)
@parameterized.parameters(
('ip', ['tcp'], [], [], [], ['ip protocol tcp']),
(
'ip',
['tcp'],
[(3198, 3199)],
[(80, 80), (443, 443)],
[],
['tcp sport 3198-3199 tcp dport { 80, 443 }'],
),
(
'ip',
['tcp'],
[],
[(80, 80), (443, 443)],
[],
['tcp dport { 80, 443 }'],
),
('ip', ['tcp, udp'], [], [], [], ['ip protocol tcp, udp']),
('ip6', ['tcp'], [], [], [], ['meta l4proto tcp']),
(
'ip6',
['tcp'],
[(3198, 3199)],
[(80, 80), (443, 443)],
[],
['tcp sport 3198-3199 tcp dport { 80, 443 }'],
),
('ip6', ['tcp', 'udp'], [], [], [], ['meta l4proto { tcp, udp }']),
)
def testPortsAndProtocols(self, af, proto, src_p, dst_p, icmp_type, expected):
result = self.dummyterm.PortsAndProtocols(
af, proto, src_p, dst_p, icmp_type
)
self.assertEqual(result, expected)
@parameterized.parameters(
'chain_name input 0 inet extraneous_target_option',
'ip6 OUTPUT 300 400 mixed input', # pylint: disable=implicit-str-concat
'ip forwarding',
'ip7 0 spaghetti',
'ip6 prerouting',
'chain_name',
'',
)
def testBadHeader(self, case):
logging.info('Testing bad header case %s.', case)
header = HEADER_TEMPLATE % case
pol = policy.ParsePolicy(header + GOOD_TERM_1, self.naming)
with self.assertRaises(nftables.HeaderError):
nftables.Nftables.__init__(
nftables.Nftables.__new__(nftables.Nftables), pol, EXP_INFO
)
@parameterized.parameters((HEADER_NOVERBOSE, False), (HEADER_COMMENT, True))
def testVerboseHeader(self, header_to_use, expected_output):
pol = policy.ParsePolicy(header_to_use + GOOD_TERM_1, self.naming)
data = nftables.Nftables(pol, EXP_INFO)
for _, _, _, _, _, _, verbose, _ in data.nftables_policies:
result = verbose
self.assertEqual(result, expected_output)
def testGoodHeader(self):
nftables.Nftables(
policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO
)
nft = str(
nftables.Nftables(
policy.ParsePolicy(
GOOD_HEADER_1 + GOOD_TERM_1 + GOOD_HEADER_2 + IPV6_SRCIP,
self.naming,
),
EXP_INFO,
)
)
self.assertIn('type filter hook input', nft)
def testStatefulFirewall(self):
nftables.Nftables(
policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO
)
nft = str(
nftables.Nftables(
policy.ParsePolicy(
GOOD_HEADER_1 + GOOD_TERM_1 + GOOD_HEADER_2 + IPV6_SRCIP,
self.naming,
),
EXP_INFO,
)
)
self.assertIn('ct state established,related accept', nft)
def testICMPv6type(self):
nftables.Nftables(
policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO
)
nft = str(
nftables.Nftables(
policy.ParsePolicy(GOOD_HEADER_1 + ICMPV6_MULTI_TERM, self.naming),
EXP_INFO,
)
)
self.assertIn(
(
'icmpv6 type { nd-router-solicit, nd-router-advert,'
' nd-neighbor-advert, nd-neighbor-solicit } accept'
),
nft,
)
def testOverridePolicyHeader(self):
expected_output = 'accept'
pol = policy.ParsePolicy(
HEAD_OVERRIDE_DEFAULT_ACTION + GOOD_TERM_1, self.naming
)
data = nftables.Nftables(pol, EXP_INFO)
for _, _, _, _, _, default_policy, _, _ in data.nftables_policies:
result = default_policy
self.assertEqual(result, expected_output)
@parameterized.parameters(
(['127.0.0.1', '8.8.8.8'], {'ip': ['127.0.0.1/32', '8.8.8.8/32']}),
(
['0.0.0.0/8', '2001:db8::/32'],
{'ip': ['0.0.0.0/8'], 'ip6': ['2001:db8::/32']},
),
)
def testAddressClassifier(self, addr_to_classify, expected_output):
result = nftables.Term._AddressClassifier(self, IPhelper(addr_to_classify))
self.assertEqual(result, expected_output)
@parameterized.parameters(
('ip6', ['multicast-listener-query'], ['mld-listener-query']),
(
'ip6',
['echo-request', 'multicast-listener-query'],
['echo-request', 'mld-listener-query'],
),
(
'ip6',
['router-solicit', 'multicast-listener-done', 'router-advertisement'],
['nd-router-solicit', 'mld-listener-done', 'nd-router-advert'],
),
('ip4', ['echo-request', 'echo-reply'], ['echo-request', 'echo-reply']),
)
def testMapICMPtypes(self, af, icmp_types, expected_output):
result = self.dummyterm.MapICMPtypes(af, icmp_types)
self.assertEqual(result, expected_output)
@parameterized.parameters(
(
{
'name': 'tcp_established',
'option': ['tcp-established', 'established'],
'icmp_type': None,
'counter': None,
'logging': [],
'protocol': ['tcp', 'icmp'],
'action': ['deny'],
},
'',
),
(
{
'name': 'icmpv6_noconttrack',
'option': [],
'icmp_type': ['router-solicit'],
'counter': None,
'logging': [],
'protocol': ['icmpv6'],
'action': ['accept'],
},
'',
),
(
{
'name': 'dont_render_tcp_established',
'option': ['tcp-established', 'established'],
'icmp_type': None,
'counter': None,
'logging': [],
'protocol': ['icmp'],
'action': ['accept'],
},
'ct state new',
),
(
{
'name': 'blank_option_donothing',
'option': [],
'icmp_type': None,
'counter': None,
'logging': [],
'protocol': ['icmp'],
'action': ['accept'],
},
'ct state new',
),
(
{
'name': 'syslog',
'option': [],
'icmp_type': None,
'counter': None,
'logging': ['syslog'],
'protocol': ['tcp'],
'action': ['accept'],
},
'ct state new log prefix "syslog"',
),
(
{
'name': 'logging_disabled',
'option': [],
'icmp_type': None,
'counter': None,
'logging': ['disable'],
'protocol': ['tcp'],
'action': ['accept'],
},
'ct state new',
),
(
{
'name': 'combo_logging_tcp_established',
'option': ['tcp-established'],
'icmp_type': None,
'counter': None,
'logging': ['true'],
'protocol': ['tcp'],
'action': ['accept'],
},
'ct state new log prefix "combo_logging_tcp_established"',
),
(
{
'name': 'combo_cnt_log_established',
'option': ['tcp-established'],
'icmp_type': None,
'counter': 'whatever-name-you-want',
'logging': ['true'],
'protocol': ['tcp'],
'action': ['deny'],
},
'log prefix "combo_cnt_log_established" counter',
),
)
def testOptionsHandler(self, term_dict, expected_output):
term = DictObj(term_dict)
result = self.dummyterm._OptionsHandler(term)
self.assertEqual(result, expected_output)
def testBuildTokens(self):
self.naming.GetServiceByProto.side_effect = [['25'], ['26']]
pol1 = nftables.Nftables(
policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO
)
st, sst = pol1._BuildTokens()
self.assertEqual(st, SUPPORTED_TOKENS)
self.assertEqual(sst, SUPPORTED_SUB_TOKENS)
@parameterized.parameters(
(
ESTABLISHED_OPTION_TERM,
(
'WARNING: Term established-term is a established term and will'
' not be rendered.'
),
),
(
TCP_ESTABLISHED_OPTION_TERM,
(
'WARNING: Term tcp-established-term is a tcp-established term and'
' will not be rendered.'
),
),
)
def testSkippedTerm(self, termdata, messagetxt):
with self.assertLogs() as ctx:
# run a policy object expected to be skipped and logged.
nft = nftables.Nftables(
policy.ParsePolicy(GOOD_HEADER_1 + termdata, self.naming), EXP_INFO
)
# self.assertEqual(len(ctx.records), 2)
record = ctx.records[1]
self.assertEqual(record.message, messagetxt)
@parameterized.parameters(
(HEADER_MIXED_AF + ICMPV6_TERM, 'ip protocol icmp'),
(HEADER_IPV4_AF + ICMPV6_TERM, 'meta l4proto icmpv6'),
(HEADER_IPV6_AF + ICMP_TERM, 'ip protocol icmp'),
)
def testRulesetGeneratorICMPmismatch(self, pol_data, doesnotcontain):
# This test ensures that ICMPv6 only term isn't rendered in a mixed header.
nftables.Nftables(policy.ParsePolicy(pol_data, self.naming), EXP_INFO)
nft = str(
nftables.Nftables(policy.ParsePolicy(pol_data, self.naming), EXP_INFO)
)
self.assertNotIn(doesnotcontain, nft)
def testRulesetGeneratorUniqueChain(self):
# This test is intended to verify that on mixed address family rulesets
# no duplicate instance of a simple deny is rendered within a mixed chain.
expected_term_rule = 'drop comment "Dual-stack IPv4/v6 deny all"'
count = 0
nftables.Nftables(
policy.ParsePolicy(HEADER_MIXED_AF + DENY_TERM, self.naming), EXP_INFO
)
nft = str(
nftables.Nftables(
policy.ParsePolicy(HEADER_MIXED_AF + DENY_TERM, self.naming),
EXP_INFO,
)
)
matching_lines = re.findall(expected_term_rule, nft)
for match in matching_lines:
count += 1
self.assertEqual(count, 1)
@parameterized.parameters(
(GOOD_HEADER_1 + GOOD_TERM_2, 'inet6'),
(GOOD_HEADER_1 + ICMPV6_TERM, 'inet6'),
(GOOD_HEADER_1 + COMMENT_TERM, 'mixed'),
(GOOD_HEADER_2 + GOOD_TERM_2, 'mixed'),
(GOOD_HEADER_3 + GOOD_TERM_2, 'inet'),
(GOOD_HEADER_3 + ICMP_TERM, 'inet'),
)
def testRulesetGeneratorAF(self, policy_data: str, expected_inet: str):
self.naming.GetNetAddr.return_value = TEST_IPS
self.naming.GetServiceByProto.return_value = ['22']
nft = nftables.Nftables(
policy.ParsePolicy(policy_data, self.naming), EXP_INFO
)
for header, terms in nft.policy.filters:
filter_options = header.FilterOptions('nftables')
nf_af, nf_hook, _, _, verbose = nft._ProcessHeader(filter_options)
for term in terms:
term_object = nftables.Term(term, nf_af, nf_hook, verbose)
# Checks for address family consistency within terms
ruleset_list = term_object.RulesetGenerator(term)
self.assertNotEmpty(ruleset_list)
for ruleset in ruleset_list:
if expected_inet == 'inet':
self.assertNotIn(str(TEST_IPV6_ONLY), ruleset)
elif expected_inet == 'inet6':
self.assertNotIn(str(TEST_IPV4_ONLY), ruleset)
for rule in ruleset.split('\n'):
if rule.startswith('ip '):
self.assertNotIn('meta l4proto', rule)
self.assertNotIn('icmpv6', rule)
if rule.startswith('ip6 '):
self.assertNotIn('ip protocol', rule)
self.assertNotIn('icmp', rule)
@parameterized.parameters(
(
GOOD_HEADER_1 + SOURCE_INTERFACE_TERM,
TEST_IPS,
' iifname eth123 meta l4proto',
),
(
GOOD_HEADER_1 + DESTINATION_INTERFACE_TERM,
TEST_IPS,
' oifname eth123 meta l4proto',
),
(GOOD_HEADER_1 + LOGGING_TERM, TEST_IPS, 'log prefix "log-packets"'),
(GOOD_HEADER_1 + COUNTER_TERM, TEST_IPS, 'counter'),
(
GOOD_HEADER_1 + COUNT_AND_LOG_TERM,
TEST_IPS,
'log prefix "count-and-log-packets" counter',
),
(
HEADER_MIXED_AF + IPV6_ONLY_TERM,
TEST_IPS,
'ip6 daddr 2001:4860:8000::5/128 ct state new accept',
),
(
HEADER_MIXED_AF + ALL_SRCIP,
TEST_IPS,
'ip saddr 10.2.3.4/32 drop comment "All IP address families. v4/v6"',
),
(GOOD_HEADER_3 + ICMP_SINGLE_TYPE, TEST_IPS, 'icmp type router-solicit'),
(
GOOD_HEADER_1 + ICMPV6_SINGLE_TYPE,
TEST_IPS,
'icmpv6 type nd-router-solicit',
),
)
def testRulesetGenerator(self, policy_data: str, IPs, contains: str):
self.naming.GetNetAddr.return_value = IPs
nft = str(
nftables.Nftables(
policy.ParsePolicy(policy_data, self.naming), EXP_INFO
)
)
self.assertIn(contains, nft)
if __name__ == '__main__':
absltest.main()
|
06236c0df47767d47440065274f28ab950af74b2
|
0032d988541e85c47b5034c20ecf88220dde5a95
|
/openbook_auth/migrations/0041_usernotificationssettings_post_comment_reaction_notifications.py
|
1347dd1f5cf7c08abdcee2a1c21b7d2b47bcfa3e
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
OkunaOrg/okuna-api
|
eabd37fef9d2be59b590ed8d72bee084ac377997
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
refs/heads/master
| 2022-02-04T21:31:10.577601
| 2021-12-28T18:20:39
| 2021-12-28T18:20:39
| 151,052,951
| 185
| 92
|
MIT
| 2022-01-13T01:00:40
| 2018-10-01T07:44:46
|
Python
|
UTF-8
|
Python
| false
| false
| 492
|
py
|
0041_usernotificationssettings_post_comment_reaction_notifications.py
|
# Generated by Django 2.2.2 on 2019-06-21 08:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openbook_auth', '0040_auto_20190606_0944'),
]
operations = [
migrations.AddField(
model_name='usernotificationssettings',
name='post_comment_reaction_notifications',
field=models.BooleanField(default=True, verbose_name='post comment reaction notifications'),
),
]
|
dc2f83c30c7c85047d72aeae41e6c3e4bfde18fe
|
ab40571d5051ad53c0f205fa797ba36eac516d06
|
/language/compgen/nqg/tasks/geoquery/funql_normalization_test.py
|
4fc9ac64464f8272c932fc2b264b7d556632ccdf
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
google-research/language
|
e941b1a92ab46d40d8d03bb0c314905cb6902ce2
|
ac9447064195e06de48cc91ff642f7fffa28ffe8
|
refs/heads/master
| 2023-08-24T23:10:13.207294
| 2023-05-25T20:47:18
| 2023-05-25T22:29:27
| 153,201,352
| 1,567
| 371
|
Apache-2.0
| 2023-07-06T23:03:15
| 2018-10-16T00:58:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,891
|
py
|
funql_normalization_test.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for funql_reformatter."""
from language.compgen.nqg.tasks.geoquery import funql_normalization
import tensorflow as tf
class FunqlReformatterTest(tf.test.TestCase):
def test_funql_reformatter_1(self):
# which states have points higher than the highest point in colorado
original = "answer(state(loc_1(place(higher_2(highest(place(loc_2(stateid(colorado)))))))))"
normalized = funql_normalization.normalize_funql(original)
restored = funql_normalization.restore_funql(normalized)
expected_normalized = "answer(intersection(state,loc_1(intersection(place,higher_2(highest(intersection(place,loc_2(stateid(colorado)))))))))"
self.assertEqual(normalized, expected_normalized)
self.assertEqual(restored, original)
def test_funql_reformatter_2(self):
# what states border the states with the most cities
original = "answer(state(next_to_2(most(state(loc_1(city(all)))))))"
normalized = funql_normalization.normalize_funql(original)
restored = funql_normalization.restore_funql(normalized)
expected_normalized = "answer(intersection(state,next_to_2(most(state,loc_1,city))))"
self.assertEqual(normalized, expected_normalized)
self.assertEqual(restored, original)
if __name__ == "__main__":
tf.test.main()
|
aaa027a671af25cfdc00637b17087982f9be889d
|
52a32a93942b7923b7c0c6ca5a4d5930bbba384b
|
/dojo/tools/cobalt/parser.py
|
172982dd67d3bd3738898954110689f95e0bf8cf
|
[
"MIT-open-group",
"GCC-exception-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LGPL-3.0-only",
"GPL-3.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-3.0-or-later",
"IJG",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"PSF-2.0",
"LicenseRef-scancode-python-cwi",
"GPL-2.0-or-later",
"HPND",
"libtiff",
"LGPL-2.1-or-later",
"EPL-2.0",
"GPL-3.0-only",
"MIT",
"BSD-3-Clause-Modification",
"LicenseRef-scancode-public-domain-disclaimer",
"HPND-Markus-Kuhn",
"CC-BY-SA-4.0",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"LicenseRef-scancode-openssl-exception-lgpl3.0plus",
"Libpng",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"BSD-Advertising-Acknowledgement",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT-Modern-Variant",
"ISC",
"GPL-2.0-only",
"LicenseRef-scancode-xfree86-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"BSD-2-Clause"
] |
permissive
|
DefectDojo/django-DefectDojo
|
43bfb1c728451335661dadc741be732a50cd2a12
|
b98093dcb966ffe972f8719337de2209bf3989ec
|
refs/heads/master
| 2023-08-21T13:42:07.238370
| 2023-08-14T18:00:34
| 2023-08-14T18:00:34
| 31,028,375
| 2,719
| 1,666
|
BSD-3-Clause
| 2023-09-14T19:46:49
| 2015-02-19T17:53:47
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,839
|
py
|
parser.py
|
import csv
import hashlib
import io
from dojo.models import Finding
__author__ = "dr3dd589"
class CobaltParser(object):
def get_scan_types(self):
return ["Cobalt.io Scan"]
def get_label_for_scan_types(self, scan_type):
return scan_type
def get_description_for_scan_types(self, scan_type):
return "CSV Report"
def get_findings(self, filename, test):
if filename is None:
return list()
content = filename.read()
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
io.StringIO(content), delimiter=",", quotechar='"'
)
csvarray = []
dupes = dict()
# FIXME double loop, could lead to performance pb if the number of
# issues is big
for row in reader:
csvarray.append(row)
for row in csvarray:
finding = Finding(test=test)
finding.title = (
row["Title"] if row["Title"][0] != "'" else row["Title"][1:]
)
Type = row["Type"] if row["Type"][0] != "'" else row["Type"][1:]
Description = (
row["Description"]
if row["Description"][0] != "'"
else row["Description"][1:]
)
finding.description = (
"**Type** : "
+ Type
+ "\n\n"
+ "**Description** : "
+ Description
+ "\n"
)
finding.mitigation = (
row["SuggestedFix"]
if row["SuggestedFix"][0] != "'"
else row["SuggestedFix"][1:]
)
finding.references = (
row["ResearcherUrl"]
if row["ResearcherUrl"][0] != "'"
else row["ResearcherUrl"][1:]
)
finding.steps_to_reproduce = (
row["StepsToReproduce"]
if row["StepsToReproduce"][0] != "'"
else row["StepsToReproduce"][1:]
)
finding.severity_justification = (
row["CriticalityJustification"]
if row["CriticalityJustification"][0] != "'"
else row["CriticalityJustification"][1:]
)
finding.severity = "Info"
if finding is not None:
if finding.title is None:
finding.title = ""
if finding.description is None:
finding.description = ""
key = hashlib.md5(
(finding.title + "|" + finding.description).encode("utf-8")
).hexdigest()
if key not in dupes:
dupes[key] = finding
return list(dupes.values())
|
cb07d1870ef3bccdc1b7a66c87a73d2486145087
|
57592d24e21aa39ff93a246784fea957f74b4c84
|
/Pennylane Demos/1 - Basics/1 - Getting Started/3. Hybrid.py
|
d45f7c8d0829dcaf0beb14b05960cdf1e9ac4c84
|
[] |
no_license
|
theerfan/Q
|
e334df61e862206adf24440297c735750f314311
|
a9855e8b5ac5193832bf822ecc866bc8650ea461
|
refs/heads/master
| 2022-10-14T19:53:28.246593
| 2022-09-13T06:57:02
| 2022-09-13T06:57:02
| 220,534,168
| 113
| 57
| null | 2022-09-13T06:57:03
| 2019-11-08T19:27:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,074
|
py
|
3. Hybrid.py
|
import pennylane as qml
from pennylane import numpy as np
## Plugin
dev_fock = qml.device("strawberryfields.fock", wires=2, cutoff_dim=2)
@qml.qnode(dev_fock, diff_method="parameter-shift")
def photon_redirection(params):
qml.FockState(1, wires=0)
qml.Beamsplitter(params[0], params[1], wires=[0, 1])
return qml.expval(qml.NumberOperator(1))
def cost(params):
return -photon_redirection(params)
init_params = np.array([0.01, 0.01])
print(cost(init_params))
# Start from 0 => gradient = 0 => trapped in local maxima
dphoton_redirection = qml.grad(photon_redirection, argnum=0)
print(dphoton_redirection([0.0, 0.0]))
# initialise the optimizer
opt = qml.GradientDescentOptimizer(stepsize=0.4)
# set the number of steps
steps = 100
# set the initial parameter values
params = init_params
for i in range(steps):
# update the circuit parameters
params = opt.step(cost, params)
if (i + 1) % 5 == 0:
print("Cost after step {:5d}: {: .7f}".format(i + 1, cost(params)))
print("Optimized rotation angles: {}".format(params))
## Hybrid
|
7933526993992b6dbe3a68a40032b82d8cb40b34
|
f9f074c44b67a11d4630b5e1cc15e016e8d73cc8
|
/factory-ai-vision/EdgeSolution/modules/CVCaptureModule/stream_manager.py
|
c4673e41f5f6afdbdf9c2c1d79aa8b1421296923
|
[
"MIT"
] |
permissive
|
Azure-Samples/azure-intelligent-edge-patterns
|
361694680c7e48d3761c5416175788355b684dcd
|
1d2f42cbf9f21157c1e1abf044b26160dfed5b16
|
refs/heads/master
| 2023-05-26T13:15:47.085088
| 2023-02-28T17:25:53
| 2023-02-28T17:25:53
| 186,706,933
| 193
| 164
|
MIT
| 2023-02-28T17:25:55
| 2019-05-14T22:02:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,568
|
py
|
stream_manager.py
|
import logging
import threading
import zmq
from streams import Stream
# FIXME RON
logger = logging.getLogger(__name__)
# logger.setLevel(logging.INFO)
class StreamManager(object):
def __init__(self):
self.streams = {}
self.mutex = threading.Lock()
self.context = None
self.sender = None
self._init_zmq()
def _init_zmq(self):
self.context = zmq.Context()
self.sender = self.context.socket(zmq.PUB)
self.sender.bind("tcp://*:5556")
def _add_new_stream(self, stream_id, rtsp, fps, endpoint):
""" internal function, no thread protect """
logger.info("Add new stream: %s", stream_id)
if stream_id in self.streams:
logger.warning("Stream %s already existed", stream_id)
return False
# FIXME RON check this
stream = Stream(stream_id, rtsp, fps, endpoint, self.sender)
self.streams[stream_id] = stream
def get_streams(self):
self.mutex.acquire()
streams = list(self.streams.values())
self.mutex.release()
return streams
def get_streams_num_danger(self):
return len(self.streams)
def get_streams_danger(self):
streams = list(self.streams.values())
return streams
def delete_stream(self, stream_id):
self._delete_stream_by_id(stream_id)
def add_stream(self, stream_id, rtsp, fps, endpoint):
self.mutex.acquire()
if stream_id in self.streams:
s = self.streams.get(stream_id, None)
if s.check_update(rtsp, fps, endpoint):
self._delete_stream_by_id(stream_id)
else:
print("nothing change")
self._add_new_stream(stream_id, rtsp, fps, endpoint)
self.mutex.release()
return "ok"
def update_streams(self, stream_id):
self.mutex.acquire()
for stream in self.streams.values():
stream.reset_metrics()
origin_stream_ids = list([stream_id for stream_id in self.streams])
logger.info("==== Update Streams ====")
logger.info("origin: %s", origin_stream_ids)
logger.info("new : %s", stream_ids)
to_delete = []
to_update = []
for stream_id in origin_stream_ids:
if stream_id not in stream_ids:
to_delete.append(stream_id)
to_update.append(stream_id)
to_add = []
for stream_id in stream_ids:
if (stream_id in stream_ids) and (stream_id not in origin_stream_ids):
to_add.append(stream_id)
logger.info("To Delete : %s", to_delete)
logger.info("To Add : %s", to_add)
for stream_id in to_delete:
# FIXME
# Need to be deleted elegantly
self._delete_stream_by_id(stream_id)
for stream_id in to_add:
self._add_new_stream(stream_id)
self.mutex.release()
def get_stream_by_id_danger(self, stream_id):
stream = self.streams.get(stream_id, None)
return stream
def get_stream_by_id(self, stream_id):
self.mutex.acquire()
if stream_id not in self.streams:
self.mutex.release()
logger.warning("Cannot find stream: %s", stream_id)
return None
stream = self.streams[stream_id]
self.mutex.release()
logger.info("Got stream: %s", stream_id)
return stream
def _delete_stream_by_id(self, stream_id):
""" internal function, no thread protect """
logger.info("Deleting stream: %s", stream_id)
if stream_id not in self.streams:
logger.warning("Cannot find stream: %s", stream_id)
return False
# FIXME need to fix this
# FIXME RON
self.streams[stream_id].delete()
del self.streams[stream_id]
logger.info("Deleted stream: %s", stream_id)
return True
def summary(self):
self.mutex.acquire()
logger.info("==== Stream Manager Summary ====")
for stream_id, stream in self.streams.items():
logger.info("Stream: %s", stream_id)
self.mutex.release()
if __name__ == "__main__":
class Stream:
def __init__(self, stream_id, model, sender):
self.model = model
self.sender = sender
def delete(self):
pass
sm = StreamManager("model")
sm.update_streams([1, 2])
sm.update_streams([2, 3])
sm.update_streams([1, 3])
sm.update_streams([1, 3])
sm.summary()
|
70b2e2a796ea19e5e56d71529672a31a547b9c95
|
9f73d653197b5218f1a5a02e06cb7f56d858a572
|
/pywikibot/data/memento.py
|
ed770fef9561737258b1bbcd9f022a44317bd1f4
|
[
"MIT"
] |
permissive
|
wikimedia/pywikibot
|
b32fbc2eb3d688f57668aed4dc488b4055196e8f
|
5c01e6bfcd328bc6eae643e661f1a0ae57612808
|
refs/heads/master
| 2023-09-03T19:22:13.926740
| 2023-09-03T14:56:01
| 2023-09-03T14:59:45
| 10,798,864
| 432
| 166
|
MIT
| 2023-08-10T23:36:48
| 2013-06-19T16:18:45
|
Python
|
UTF-8
|
Python
| false
| false
| 13,779
|
py
|
memento.py
|
"""Fix ups for memento-client package version 0.6.1.
.. versionadded:: 7.4
.. seealso:: https://github.com/mementoweb/py-memento-client#readme
"""
#
# (C) Shawn M. Jones, Harihar Shankar, Herbert Van de Sompel.
# -- Los Alamos National Laboratory, 2013
# Parts of MementoClient class codes are
# licensed under the BSD open source software license.
#
# (C) Pywikibot team, 2015-2023
#
# Distributed under the terms of the MIT license.
#
from datetime import datetime
from typing import Optional
import requests
from memento_client.memento_client import MementoClient as OldMementoClient
from memento_client.memento_client import MementoClientException
from requests.exceptions import InvalidSchema, MissingSchema
from pywikibot import config, debug, sleep, warning
__all__ = (
'MementoClient',
'MementoClientException',
'get_closest_memento_url',
)
class MementoClient(OldMementoClient):
"""A Memento Client.
It makes it straightforward to access the Web of the past as it is
to access the current Web.
.. versionchanged:: 7.4
`timeout` is used in several methods.
Basic usage:
>>> mc = MementoClient()
>>> dt = mc.convert_to_datetime("Sun, 01 Apr 2010 12:00:00 GMT")
>>> mi = mc.get_memento_info("http://www.bbc.com/", dt, timeout=60)
>>> mi['original_uri']
'http://www.bbc.com/'
>>> mi['timegate_uri']
'http://timetravel.mementoweb.org/timegate/http://www.bbc.com/'
>>> sorted(mi['mementos'])
['closest', 'first', 'last', 'next', 'prev']
>>> from pprint import pprint
>>> pprint(mi['mementos'])
{'closest': {'datetime': datetime.datetime(2010, 5, 23, 10, 19, 6),
'http_status_code': 200,
'uri': ['https://web.archive.org/web/20100523101906/http://www.bbc.co.uk/']},
'first': {'datetime': datetime.datetime(1998, 12, 2, 21, 26, 10),
'uri': ['http://wayback.nli.org.il:8080/19981202212610/http://www.bbc.com/']},
'last': {'datetime': datetime.datetime(2022, 7, 31, 3, 30, 53),
'uri': ['http://archive.md/20220731033053/http://www.bbc.com/']},
'next': {'datetime': datetime.datetime(2010, 6, 2, 17, 29, 9),
'uri': ['http://wayback.archive-it.org/all/20100602172909/http://www.bbc.com/']},
'prev': {'datetime': datetime.datetime(2009, 10, 15, 19, 7, 5),
'uri': ['http://wayback.nli.org.il:8080/20091015190705/http://www.bbc.com/']}}
The output conforms to the Memento API format explained here:
http://timetravel.mementoweb.org/guide/api/#memento-json
.. note:: The mementos result is not deterministic. It may be
different for the same parameters.
By default, MementoClient uses the Memento Aggregator:
http://mementoweb.org/depot/
It is also possible to use different TimeGate, simply initialize
with a preferred timegate base uri. Toggle check_native_timegate to
see if the original uri has its own timegate. The native timegate,
if found will be used instead of the timegate_uri preferred. If no
native timegate is found, the preferred timegate_uri will be used.
:param str timegate_uri: A valid HTTP base uri for a timegate.
Must start with http(s):// and end with a /.
:param int max_redirects: the maximum number of redirects allowed
for all HTTP requests to be made.
:return: A :class:`MementoClient` obj.
""" # noqa: E501
def __init__(self, *args, **kwargs):
"""Initializer."""
# To prevent documentation inclusion from inherited class
# because it is malformed.
super().__init__(*args, **kwargs)
def get_memento_info(self, request_uri: str,
accept_datetime: Optional[datetime] = None,
timeout: Optional[int] = None,
**kwargs) -> dict:
"""Query the preferred timegate and return the closest memento uri.
Given an original uri and an accept datetime, this method
queries the preferred timegate and returns the closest memento
uri, along with prev/next/first/last if available.
.. seealso:: http://timetravel.mementoweb.org/guide/api/#memento-json
for the response format.
:param request_uri: The input http uri.
:param accept_datetime: The datetime object of the accept
datetime. The current datetime is used if none is provided.
:param timeout: the timeout value for the HTTP connection.
:return: A map of uri and datetime for the
closest/prev/next/first/last mementos.
"""
# for reading the headers of the req uri to find uri_r
req_uri_response = kwargs.get('req_uri_response')
# for checking native tg uri in uri_r
org_response = kwargs.get('org_response')
tg_response = kwargs.get('tg_response')
if not tg_response:
native_tg = None
original_uri = self.get_original_uri(
request_uri, response=req_uri_response)
if self.check_native_timegate:
native_tg = self.get_native_timegate_uri(
original_uri, accept_datetime=accept_datetime,
response=org_response)
timegate_uri = native_tg if native_tg \
else self.timegate_uri + original_uri
http_acc_dt = MementoClient.convert_to_http_datetime(
accept_datetime)
tg_response = MementoClient.request_head(
timegate_uri,
accept_datetime=http_acc_dt,
follow_redirects=True,
session=self.session,
timeout=timeout
)
return super().get_memento_info(request_uri,
accept_datetime=accept_datetime,
tg_response=tg_response,
**kwargs)
def get_native_timegate_uri(self,
original_uri: str,
accept_datetime: Optional[datetime],
timeout: Optional[int] = None,
**kwargs) -> Optional[str]:
"""Check the original uri whether the timegate uri is provided.
Given an original URL and an accept datetime, check the original uri
to see if the timegate uri is provided in the Link header.
:param original_uri: An HTTP uri of the original resource.
:param accept_datetime: The datetime object of the accept
datetime
:param timeout: the timeout value for the HTTP connection.
:return: The timegate uri of the original resource, if provided,
else None.
"""
org_response = kwargs.pop('response', None)
if not org_response:
try:
org_response = MementoClient.request_head(
original_uri,
accept_datetime=MementoClient.convert_to_http_datetime(
accept_datetime),
session=self.session,
timeout=timeout
)
except (requests.exceptions.ConnectTimeout,
requests.exceptions.ConnectionError): # pragma: no cover
warning('Could not connect to URI {}, returning no native '
'URI-G'.format(original_uri))
return None
debug('Request headers sent to search for URI-G: '
+ str(org_response.request.headers))
return super().get_native_timegate_uri(original_uri, accept_datetime,
response=org_response, **kwargs)
@staticmethod
def is_timegate(uri: str,
accept_datetime: Optional[str] = None,
response: Optional[requests.Response] = None,
session: Optional[requests.Session] = None,
timeout: Optional[int] = None) -> bool:
"""Checks if the given uri is a valid timegate according to the RFC.
:param uri: the http uri to check.
:param accept_datetime: the accept datetime string in http date
format.
:param response: the response object of the uri.
:param session: the requests session object.
:param timeout: the timeout value for the HTTP connection.
:return: True if a valid timegate, else False.
"""
if not response:
if not accept_datetime:
accept_datetime = MementoClient.convert_to_http_datetime(
datetime.now())
response = MementoClient.request_head(
uri,
accept_datetime=accept_datetime,
session=session,
timeout=timeout
)
return old_is_timegate(
uri, accept_datetime, response=response, session=session)
@staticmethod
def is_memento(uri: str,
response: Optional[requests.Response] = None,
session: Optional[requests.Session] = None,
timeout: Optional[int] = None) -> bool:
"""
Determines if the URI given is indeed a Memento.
The simple case is to look for a Memento-Datetime header in the
request, but not all archives are Memento-compliant yet.
:param uri: an HTTP URI for testing
:param response: the response object of the uri.
:param session: the requests session object.
:param timeout: (int) the timeout value for the HTTP connection.
:return: True if a Memento, False otherwise
"""
if not response:
response = MementoClient.request_head(uri,
follow_redirects=False,
session=session,
timeout=timeout)
return old_is_memento(uri, response=response)
@staticmethod
def convert_to_http_datetime(dt: Optional[datetime]) -> str:
"""Converts a datetime object to a date string in HTTP format.
:param dt: A datetime object.
:return: The date in HTTP format.
:raises TypeError: Expecting dt parameter to be of type datetime.
"""
if dt and not isinstance(dt, datetime):
raise TypeError(
'Expecting dt parameter to be of type datetime.')
return old_convert_to_http_datetime(dt)
@staticmethod
def request_head(uri: str,
accept_datetime: Optional[str] = None,
follow_redirects: bool = False,
session: Optional[requests.Session] = None,
timeout: Optional[int] = None) -> requests.Response:
"""Makes HEAD requests.
:param uri: the uri for the request.
:param accept_datetime: the accept-datetime in the http format.
:param follow_redirects: Toggle to follow redirects. False by
default, so does not follow any redirects.
:param session: the request session object to avoid opening new
connections for every request.
:param timeout: the timeout for the HTTP requests.
:return: the response object.
:raises ValueError: Only HTTP URIs are supported
"""
headers = {
'Accept-Datetime': accept_datetime} if accept_datetime else {}
# create a session if not supplied
session_set = False
if not session:
session = requests.Session()
session_set = True
try:
response = session.head(uri,
headers=headers,
allow_redirects=follow_redirects,
timeout=timeout or 9)
except (InvalidSchema, MissingSchema):
raise ValueError(
f'Only HTTP URIs are supported, URI {uri} unrecognized.')
if session_set:
session.close()
return response
# Save old static methods and update static methods of parent class
old_is_timegate = OldMementoClient.is_timegate
old_is_memento = OldMementoClient.is_memento
old_convert_to_http_datetime = OldMementoClient.convert_to_http_datetime
OldMementoClient.is_timegate = MementoClient.is_timegate
OldMementoClient.is_memento = MementoClient.is_memento
OldMementoClient.convert_to_http_datetime \
= MementoClient.convert_to_http_datetime
OldMementoClient.request_head = MementoClient.request_head
def get_closest_memento_url(url: str,
when: Optional[datetime] = None,
timegate_uri: Optional[str] = None):
"""Get most recent memento for url."""
if not when:
when = datetime.now()
mc = MementoClient()
if timegate_uri:
mc.timegate_uri = timegate_uri
retry_count = 0
while retry_count <= config.max_retries:
try:
memento_info = mc.get_memento_info(url, when)
break
except (requests.ConnectionError, MementoClientException) as e:
error = e
retry_count += 1
sleep(config.retry_wait)
else:
raise error
mementos = memento_info.get('mementos')
if not mementos:
err_msg = 'mementos not found for {} via {}'
elif 'closest' not in mementos:
err_msg = 'closest memento not found for {} via {}'
elif 'uri' not in mementos['closest']:
err_msg = 'closest memento uri not found for {} via {}'
else:
return mementos['closest']['uri'][0]
raise Exception(err_msg.format(url, timegate_uri))
|
d7d00d30ec5b90d7939994f313f7a04afc9cdb53
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/ops-pkg/src/genie/libs/ops/ospf/iosxr/tests/ospf_output.py
|
f7492cba39f87b9128a52ca3831fd299d8a99c31
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 215,374
|
py
|
ospf_output.py
|
'''
OSPF Genie Ops Object Outputs for IOSXR.
'''
class OspfOutput(object):
############################################################################
# OSPF INFO OUTPUTS
############################################################################
# 'show protocols afi-all all'
ShowProtocolsAfiAllAll = {
'protocols':
{'bgp':
{'address_family':
{'vpnv4 unicast':
{'distance':
{'external': 20,
'internal': 200,
'local': 200},
'neighbors':
{'10.64.4.4':
{'gr_enable': 'No',
'last_update': '00:01:28',
'nsr_state': 'None'}}},
'vpnv6 unicast':
{'distance':
{'external': 20,
'internal': 200,
'local': 200},
'neighbors':
{'10.64.4.4':
{'gr_enable': 'No',
'last_update': '00:01:28',
'nsr_state': 'None'}}}},
'bgp_pid': 100,
'graceful_restart':
{'enable': False},
'nsr':
{'current_state': 'active ready',
'enable': True}},
'ospf':
{'vrf':
{'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'interfaces': ['Loopback0', 'GigabitEthernet0/0/0/0', 'GigabitEthernet0/0/0/2'],
'mpls':
{'te':
{'enable': True}}}},
'nsf': False,
'preference':
{'multi_values':
{'external': 114,
'granularity':
{'detail':
{'inter_area': 113,
'intra_area': 112}}},
'single_value':
{'all': 110}},
'redistribution':
{'bgp':
{'bgp_id': 100,
'metric': 111},
'connected':
{'enabled': True},
'isis':
{'isis_pid': '10',
'metric': 3333},
'static':
{'enabled': True,
'metric': 10}},
'router_id': '10.36.3.3'}}}}}}}}}
# 'show ospf vrf all-inclusive'
ShowOspfVrfAllInclusive = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'adjacency_stagger':
{'disable': False,
'initial_number': 2,
'maximum_number': 64,
'nbrs_forming': 0,
'nbrs_full': 1},
'areas':
{'0.0.0.1':
{'area_id': '0.0.0.1',
'area_type': 'stub',
'summary': True,
'default_cost': 111,
'ranges':
{'10.4.0.0/16':
{'advertise': True,
'prefix': '10.4.0.0/16'}},
'statistics':
{'area_scope_lsa_cksum_sum': '0x04f437',
'area_scope_lsa_count': 11,
'area_scope_opaque_lsa_cksum_sum': '00000000',
'area_scope_opaque_lsa_count': 0,
'dcbitless_lsa_count': 1,
'donotage_lsa_count': 0,
'flood_list_length': 0,
'indication_lsa_count': 0,
'interfaces_count': 2,
'lfa_interface_count': 0,
'lfa_per_prefix_interface_count': 0,
'lfa_revision': 0,
'nbrs_full': 1,
'nbrs_staggered_mode': 0,
'spf_runs_count': 79}}},
'database_control':
{'max_lsa': 123},
'external_flood_list_length': 0,
'flags':
{'abr': True,
'asbr': True},
'flood_pacing_interval_msec': 33,
'graceful_restart':
{'cisco':
{'enable': True,
'type': 'ietf'}},
'lsd_revision': 1,
'lsd_state': 'connected, registered, bound',
'maximum_interfaces': 1024,
'nsr':
{'enable': True},
'numbers':
{'dc_bitless': 0,
'do_not_age': 0,
'external_lsa': 0,
'external_lsa_checksum': '00000000',
'opaque_as_lsa': 0,
'opaque_as_lsa_checksum': '00000000'},
'redistribution':
{'bgp':
{'bgp_id': 100,
'metric': 111},
'connected':
{'enabled': True,
'metric': 10},
'isis':
{'isis_pid': '10',
'metric': 3333},
'max_prefix':
{'num_of_prefix': 4000,
'prefix_thld': 70,
'warn_only': False},
'static':
{'enabled': True}},
'retransmission_pacing_interval': 66,
'role': 'primary active',
'router_id': '10.36.3.3',
'segment_routing_global_block_default': '16000-23999',
'segment_routing_global_block_status': 'not allocated',
'snmp_trap': False,
'spf_control':
{'throttle':
{'lsa':
{'arrival': 100,
'hold': 200,
'interval': 200,
'maximum': 5000,
'refresh_interval': 1800,
'start': 50},
'spf':
{'hold': 200,
'maximum': 5000,
'start': 50}}},
'strict_spf': True,
'total_areas': 1,
'total_normal_areas': 1,
'total_nssa_areas': 0,
'total_stub_areas': 0,
'stub_router':
{'always':
{'always': False,
'external_lsa': False,
'include_stub': False,
'summary_lsa': False}}}}}}},
'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'adjacency_stagger':
{'disable': False,
'initial_number': 2,
'maximum_number': 64,
'nbrs_forming': 0,
'nbrs_full': 2},
'areas':
{'0.0.0.0':
{'area_type': 'normal',
'area_id': '0.0.0.0',
'rrr_enabled': True,
'statistics':
{'area_scope_lsa_cksum_sum': '0x0a2fb5',
'area_scope_lsa_count': 19,
'area_scope_opaque_lsa_cksum_sum': '00000000',
'area_scope_opaque_lsa_count': 0,
'dcbitless_lsa_count': 5,
'donotage_lsa_count': 0,
'flood_list_length': 0,
'indication_lsa_count': 0,
'interfaces_count': 3,
'lfa_interface_count': 0,
'lfa_per_prefix_interface_count': 0,
'lfa_revision': 0,
'nbrs_full': 2,
'nbrs_staggered_mode': 0,
'spf_runs_count': 26},
'topology_version': 15}},
'external_flood_list_length': 0,
'flood_pacing_interval_msec': 33,
'lsd_revision': 1,
'lsd_state': 'connected, registered, bound',
'maximum_interfaces': 1024,
'mpls':
{'ldp':
{'ldp_igp_sync': True,
'ldp_sync_status': 'not achieved'}},
'nsr':
{'enable': True},
'numbers':
{'dc_bitless': 0,
'do_not_age': 0,
'external_lsa': 1,
'external_lsa_checksum': '0x00607f',
'opaque_as_lsa': 0,
'opaque_as_lsa_checksum': '00000000'},
'retransmission_pacing_interval': 66,
'role': 'primary active',
'router_id': '10.36.3.3',
'segment_routing_global_block_default': '16000-23999',
'segment_routing_global_block_status': 'not allocated',
'snmp_trap': True,
'spf_control':
{'throttle':
{'lsa':
{'arrival': 100,
'hold': 200,
'interval': 200,
'maximum': 5000,
'refresh_interval': 1800,
'start': 50},
'spf':
{'hold': 200,
'maximum': 5000,
'start': 50}}},
'strict_spf': True,
'total_areas': 1,
'total_normal_areas': 1,
'total_nssa_areas': 0,
'total_stub_areas': 0,
'stub_router':
{'always':
{'always': True,
'external_lsa': True,
'external_lsa_metric': 16711680,
'include_stub': True,
'state': 'active',
'summary_lsa': True,
'summary_lsa_metric': 16711680},
'on_startup':
{'on_startup': 5,
'external_lsa': True,
'external_lsa_metric': 16711680,
'include_stub': True,
'state': 'inactive',
'summary_lsa': True,
'summary_lsa_metric': 16711680},
'on_switchover':
{'on_switchover': 10,
'external_lsa': True,
'external_lsa_metric': 16711680,
'include_stub': True,
'state': 'inactive',
'summary_lsa': True,
'summary_lsa_metric': 16711680}}}}}}}}}
ShowOspfVrfAllInclusive_custom = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'adjacency_stagger':
{'disable': False,
'initial_number': 2,
'maximum_number': 64,
'nbrs_forming': 0,
'nbrs_full': 1},
'areas':
{'0.0.0.1':
{'area_id': '0.0.0.1',
'area_type': 'stub',
'summary': True,
'default_cost': 111,
'ranges':
{'10.4.0.0/16':
{'advertise': True,
'prefix': '10.4.0.0/16'}},
'statistics':
{
'area_scope_lsa_cksum_sum':
'0x04f437',
'area_scope_lsa_count': 11,
'area_scope_opaque_lsa_cksum_sum': '00000000',
'area_scope_opaque_lsa_count': 0,
'dcbitless_lsa_count': 1,
'donotage_lsa_count': 0,
'flood_list_length': 0,
'indication_lsa_count': 0,
'interfaces_count': 2,
'lfa_interface_count': 0,
'lfa_per_prefix_interface_count': 0,
'lfa_revision': 0,
'nbrs_full': 1,
'nbrs_staggered_mode': 0,
'spf_runs_count': 79}}},
'database_control':
{'max_lsa': 123},
'external_flood_list_length': 0,
'flags':
{'abr': True,
'asbr': True},
'flood_pacing_interval_msec': 33,
'graceful_restart':
{'cisco':
{'enable': True,
'type': 'ietf'}},
'lsd_revision': 1,
'lsd_state': 'connected, registered, bound',
'maximum_interfaces': 1024,
'nsr':
{'enable': True},
'numbers':
{'dc_bitless': 0,
'do_not_age': 0,
'external_lsa': 0,
'external_lsa_checksum': '00000000',
'opaque_as_lsa': 0,
'opaque_as_lsa_checksum': '00000000'},
'redistribution':
{'bgp':
{'bgp_id': 100,
'metric': 111},
'connected':
{'enabled': True,
'metric': 10},
'isis':
{'isis_pid': '10',
'metric': 3333},
'max_prefix':
{'num_of_prefix': 4000,
'prefix_thld': 70,
'warn_only': False},
'static':
{'enabled': True}},
'retransmission_pacing_interval': 66,
'role': 'primary active',
'router_id': '10.36.3.3',
'segment_routing_global_block_default':
'16000-23999',
'segment_routing_global_block_status': 'not '
'allocated',
'snmp_trap': False,
'spf_control':
{'throttle':
{'lsa':
{'arrival': 100,
'hold': 200,
'interval': 200,
'maximum': 5000,
'refresh_interval': 1800,
'start': 50},
'spf':
{'hold': 200,
'maximum': 5000,
'start': 50}}},
'strict_spf': True,
'total_areas': 1,
'total_normal_areas': 1,
'total_nssa_areas': 0,
'total_stub_areas': 0,
'stub_router':
{'always':
{'always': False,
'external_lsa': False,
'include_stub': False,
'summary_lsa': False}}}}}}},
}}
ShowOspfMplsTrafficEngLink = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'mpls':
{'te':
{'area_instance': 2,
'enable': True,
'total_links': 2}}}},
'mpls':
{'te':
{'router_id': '10.36.3.3'}}}}}}},
'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'mpls':
{'te':
{'area_instance': 2,
'enable': True,
'link_fragments':
{1:
{'affinity_bit': 0,
'extended_admin_groups':
{0: {'value': 0},
1: {'value': 0},
2: {'value': 0},
3: {'value': 0},
4: {'value': 0},
5: {'value': 0},
6: {'value': 0},
7: {'value': 0}},
'interface_address': '10.3.4.3',
'link_id': '10.3.4.4',
'link_instance': 2,
'maximum_bandwidth': 125000000,
'maximum_reservable_bandwidth': 93750000,
'network_type': 'broadcast',
'out_interface_id': 4,
'te_admin_metric': 1,
'total_extended_admin_group': 8,
'total_priority': 8,
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}},
2:
{'affinity_bit': 0,
'extended_admin_groups':
{0: {'value': 0},
1: {'value': 0},
2: {'value': 0},
3: {'value': 0},
4: {'value': 0},
5: {'value': 0},
6: {'value': 0},
7: {'value': 0}},
'interface_address': '10.2.3.3',
'link_id': '10.2.3.3',
'link_instance': 2,
'maximum_bandwidth': 125000000,
'maximum_reservable_bandwidth': 93750000,
'network_type': 'broadcast',
'out_interface_id': 6,
'te_admin_metric': 1,
'total_extended_admin_group': 8,
'total_priority': 8,
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}},
'total_links': 2}}}},
'mpls':
{'te':
{'router_id': '10.36.3.3'}}}}}}}}}
# 'show ospf vrf all-inclusive sham-links'
ShowOspfVrfAllInclusiveShamLinks = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'sham_links':
{'10.21.33.33 10.151.22.22':
{'cost': 111,
'dcbitless_lsa_count': 1,
'donotage_lsa': 'not allowed',
'dead_interval': 13,
'demand_circuit': True,
'hello_interval': 3,
'hello_timer': '00:00:00:772',
'if_index': 2,
'local_id': '10.21.33.33',
'name': 'SL0',
'link_state': 'up',
'remote_id': '10.151.22.22',
'retransmit_interval': 5,
'state': 'point-to-point,',
'transit_area_id': '0.0.0.1',
'transmit_delay': 7,
'wait_interval': 13}}}}}}}}}}}
# 'show ospf vrf all-inclusive virtual-links'
ShowOspfVrfAllInclusiveVirtualLinks = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'virtual_links':
{'0.0.0.1 10.16.2.2':
{'authentication':
{'auth_trailer_key':
{'crypto_algorithm': 'simple'}},
'cost': 65535,
'dcbitless_lsa_count': 1,
'donotage_lsa': 'not allowed',
'dead_interval': 16,
'demand_circuit': True,
'hello_interval': 4,
'hello_timer': '00:00:03:179',
'interface': 'GigabitEthernet0/0/0/3',
'name': 'VL0',
'link_state': 'up',
'nsf':
{'enable': True,
'last_restart': '00:18:16'},
'retransmit_interval': 44,
'router_id': '10.16.2.2',
'state': 'point-to-point,',
'transit_area_id': '0.0.0.1',
'transmit_delay': 5,
'wait_interval': 16}}}}}}}}}}}
# 'show ospf vrf all-inclusive database router'
ShowOspfVrfAllInclusiveDatabaseRouter = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'database':
{'lsa_types':
{1:
{'lsa_type': 1,
'lsas':
{'10.229.11.11 10.229.11.11':
{'adv_router': '10.229.11.11',
'lsa_id': '10.229.11.11',
'ospfv2':
{'body':
{'router':
{'links':
{'10.186.5.1':
{'link_data': '10.186.5.1',
'link_id': '10.186.5.1',
'num_tos_metrics': 0,
'topologies':
{0:
{'metric': 1,
'mt_id': 0,
'tos': 0}},
'type': 'transit network'},
'10.151.22.22':
{'link_data': '0.0.0.14',
'link_id': '10.151.22.22',
'num_tos_metrics': 0,
'topologies':
{0:
{'metric': 111,
'mt_id': 0,
'tos': 0}},
'type': 'another router (point-to-point)'}},
'num_of_links': 2}},
'header':
{'adv_router': '10.229.11.11',
'age': 1713,
'area_border_router': True,
'as_boundary_router': True,
'checksum': '0x9ce3',
'length': 48,
'lsa_id': '10.229.11.11',
'option': 'None',
'option_desc': 'No '
'TOS-capability, '
'DC',
'routing_bit_enable': True,
'seq_num': '8000003e',
'type': 1}}}}}}}}}}}}}},
'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'database':
{'lsa_types':
{1:
{'lsa_type': 1,
'lsas':
{'10.4.1.1 10.4.1.1':
{'adv_router': '10.4.1.1',
'lsa_id': '10.4.1.1',
'ospfv2':
{'body':
{'router':
{'links':
{'10.4.1.1':
{'link_data': '255.255.255.255',
'link_id': '10.4.1.1',
'num_tos_metrics': 0,
'topologies':
{0:
{'metric': 1,
'mt_id': 0,
'tos': 0}},
'type': 'stub network'},
'10.1.2.1':
{'link_data': '10.1.2.1',
'link_id': '10.1.2.1',
'num_tos_metrics': 0,
'topologies':
{0:
{'metric': 1,
'mt_id': 0,
'tos': 0}},
'type': 'transit network'},
'10.1.4.4':
{'link_data': '10.1.4.1',
'link_id': '10.1.4.4',
'num_tos_metrics': 0,
'topologies':
{0:
{'metric': 1,
'mt_id': 0,
'tos': 0}},
'type': 'transit network'}},
'num_of_links': 3}},
'header':
{'adv_router': '10.4.1.1',
'age': 1802,
'checksum': '0x6228',
'length': 60,
'lsa_id': '10.4.1.1',
'option': 'None',
'option_desc': 'No '
'TOS-capability, '
'DC',
'routing_bit_enable': True,
'seq_num': '8000003d',
'type': 1}}}}}}}}}}}}}}}}
# 'show ospf vrf all-inclusive database external'
ShowOspfVrfAllInclusiveDatabaseExternal = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'database':
{'lsa_types':
{5:
{'lsa_type': 5,
'lsas':
{'10.115.55.55 10.100.5.5':
{'adv_router': '10.100.5.5',
'lsa_id': '10.115.55.55',
'ospfv2':
{'body':
{'external':
{'network_mask': '255.255.255.255',
'topologies':
{0:
{'external_route_tag': 0,
'flags': 'E',
'forwarding_address': '0.0.0.0',
'metric': 20,
'mt_id': 0,
'tos': 0}}}},
'header':
{'adv_router': '10.100.5.5',
'age': 520,
'checksum': '0x7d61',
'length': 36,
'lsa_id': '10.115.55.55',
'option': 'None',
'option_desc': 'No TOS-capability, DC',
'routing_bit_enable': True,
'seq_num': '90000006',
'type': 5}}}}}}}}}}}}}},
'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'database':
{'lsa_types':
{5:
{'lsa_type': 5,
'lsas':
{'10.94.44.44 10.64.4.4':
{'adv_router': '10.64.4.4',
'lsa_id': '10.94.44.44',
'ospfv2':
{'body':
{'external':
{'network_mask': '255.255.255.255',
'topologies':
{0:
{'external_route_tag': 0,
'flags': 'E',
'forwarding_address': '0.0.0.0',
'metric': 20,
'mt_id': 0,
'tos': 0}}}},
'header':
{'adv_router': '10.64.4.4',
'age': 608,
'checksum': '0x7d61',
'length': 36,
'lsa_id': '10.94.44.44',
'option': 'None',
'option_desc': 'No TOS-capability, DC',
'routing_bit_enable': True,
'seq_num': '80000002',
'type': 5}}}}}}}}}}}}}}}}
# 'show ospf vrf all-inclusive database network'
ShowOspfVrfAllInclusiveDatabaseNetwork = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'database':
{'lsa_types':
{2:
{'lsa_type': 2,
'lsas':
{'10.186.5.1 10.229.11.11':
{'adv_router': '10.229.11.11',
'lsa_id': '10.186.5.1',
'ospfv2':
{'body':
{'network':
{'attached_routers':
{'10.229.11.11': {},
'10.115.55.55': {}},
'network_mask': '255.255.255.0'}},
'header':
{'adv_router': '10.229.11.11',
'age': 522,
'checksum': '0xddd9',
'length': 32,
'lsa_id': '10.186.5.1',
'option': 'None',
'option_desc': 'No '
'TOS-capability, '
'DC',
'routing_bit_enable': True,
'seq_num': '80000033',
'type': 2}}}}}}}}}}}}}},
'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'database':
{'lsa_types':
{2:
{'lsa_type': 2,
'lsas':
{'10.1.2.1 10.4.1.1':
{'adv_router': '10.4.1.1',
'lsa_id': '10.1.2.1',
'ospfv2':
{'body':
{'network':
{'attached_routers':
{'10.4.1.1': {},
'10.16.2.2': {}},
'network_mask': '255.255.255.0'}},
'header':
{'adv_router': '10.4.1.1',
'age': 1844,
'checksum': '0x3dd0',
'length': 32,
'lsa_id': '10.1.2.1',
'option': 'None',
'option_desc': 'No '
'TOS-capability, '
'DC',
'routing_bit_enable': True,
'seq_num': '8000000f',
'type': 2}}}}}}}}}}}}}}}}
# 'show ospf vrf all-inclusive database summary'
ShowOspfVrfAllInclusiveDatabaseSummary = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'database':
{'lsa_types':
{3:
{'lsa_type': 3,
'lsas':
{'10.186.4.0 10.36.3.3':
{'adv_router': '10.36.3.3',
'lsa_id': '10.186.4.0',
'ospfv2':
{'body':
{'summary':
{'network_mask': '255.255.255.0',
'topologies':
{0:
{'metric': 75565,
'mt_id': 0,
'tos': 0}}}},
'header':
{'adv_router': '10.36.3.3',
'age': 608,
'checksum': '0xaa4a',
'length': 28,
'lsa_id': '10.186.4.0',
'option': 'None',
'option_desc': 'No '
'TOS-capability, '
'DC',
'seq_num': '90000001',
'type': 3}}}}}}}}}}}}}},
'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'database':
{'lsa_types':
{3:
{'lsa_type': 3,
'lsas':
{'10.186.3.0 10.16.2.2':
{'adv_router': '10.16.2.2',
'lsa_id': '10.186.3.0',
'ospfv2':
{'body':
{'summary':
{'network_mask': '255.255.255.0',
'topologies':
{0:
{'metric': 65575,
'mt_id': 0,
'tos': 0}}}},
'header':
{'adv_router': '10.16.2.2',
'age': 520,
'checksum': '0xaa4a',
'length': 28,
'lsa_id': '10.186.3.0',
'option': 'None',
'option_desc': 'No '
'TOS-capability, '
'DC',
'seq_num': '80000001',
'type': 3}}}}}}}}}}}}}}}}
# 'show ospf vrf all-inclusive database opaque-area'
ShowOspfVrfAllInclusiveDatabaseOpaqueArea = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'database':
{'lsa_types':
{10:
{'lsa_type': 10,
'lsas':
{'10.1.0.7 10.16.2.2':
{'adv_router': '10.16.2.2',
'lsa_id': '10.1.0.7',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'igp_metric': 1,
'link_id': '10.3.2.2',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'10.3.2.2': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'total_priority': 8,
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.16.2.2',
'age': 420,
'checksum': '0x5ec',
'length': 160,
'lsa_id': '10.1.0.7',
'opaque_id': 6,
'opaque_type': 1,
'option': 'None',
'option_desc': 'No '
'TOS-capability, '
'DC',
'seq_num': '90000002',
'type': 10}}}}}}}}}}}}}},
'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'database':
{'lsa_types':
{10:
{'lsa_type': 10,
'lsas':
{'10.1.0.6 10.36.3.3':
{'adv_router': '10.36.3.3',
'lsa_id': '10.1.0.6',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0',
'extended_admin_group':
{'groups':
{0: {'value': 0},
1: {'value': 0},
2: {'value': 0},
3: {'value': 0},
4: {'value': 0},
5: {'value': 0},
6: {'value': 0},
7: {'value': 0}},
'length': 8},
'igp_metric': 1,
'link_id': '10.2.3.3',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'10.2.3.3': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'total_priority': 8,
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.36.3.3',
'age': 1175,
'checksum': '0x5ec',
'length': 160,
'lsa_id': '10.1.0.6',
'opaque_id': 6,
'opaque_type': 1,
'option': 'None',
'option_desc': 'No '
'TOS-capability, '
'DC',
'seq_num': '80000002',
'type': 10}}}}}}}}}}}}}}}}
# 'show ospf vrf all-inclusive interface'
ShowOspfVrfAllInclusiveInterface = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'interfaces':
{'GigabitEthernet0/0/0/1':
{'bdr_ip_addr': '10.19.7.3',
'bdr_router_id': '10.36.3.3',
'bfd':
{'enable': True,
'interval': 12345,
'mode': 'default',
'multiplier': 50},
'cost': 1,
'dead_interval': 40,
'demand_circuit': False,
'dr_ip_addr': '10.19.7.7',
'dr_router_id': '10.1.77.77',
'enable': True,
'flood_queue_length': 0,
'hello_timer': '00:00:03:040',
'hello_interval': 10,
'index': '1/1',
'interface_type': 'broadcast',
'ip_address': '10.19.7.3/24',
'last_flood_scan_length': 1,
'last_flood_scan_time_msec': 0,
'line_protocol': True,
'ls_ack_list': 'current',
'ls_ack_list_length': 0,
'high_water_mark': 11,
'max_flood_scan_length': 5,
'max_flood_scan_time_msec': 0,
'max_pkt_sz': 1500,
'mtu': 1500,
'name': 'GigabitEthernet0/0/0/1',
'next': '0(0)/0(0)',
'passive': False,
'priority': 1,
'process_id': '1',
'retransmit_interval': 5,
'router_id': '10.36.3.3',
'state': 'bdr',
'transmit_delay': 1,
'wait_interval': 40,
'statistics':
{'adj_nbr_count': 1,
'nbr_count': 1,
'num_nbrs_suppress_hello': 0,
'multi_area_intf_count': 0,
},
'neighbors':
{'10.1.77.77':
{'dr_router_id': '10.1.77.77'},
},
}},
'virtual_links':
{'0.0.0.1 10.16.2.2':
{'bfd':
{'enable': False},
'cost': 1,
'dead_interval': 40,
'demand_circuit': True,
'donotage_lsa': False,
'enable': False,
'flood_queue_length': 0,
'hello_timer': '00:00:01:281',
'hello_interval': 10,
'high_water_mark': 20,
'index': '4/7',
'interface_type': 'virtual-link',
'ip_address': '0.0.0.0/0',
'last_flood_scan_length': 7,
'last_flood_scan_time_msec': 0,
'line_protocol': True,
'ls_ack_list': 'current',
'ls_ack_list_length': 0,
'max_flood_scan_length': 7,
'max_flood_scan_time_msec': 0,
'max_pkt_sz': 1500,
'mtu': 0,
'name': 'VL0',
'next': '0(0)/0(0)',
'passive': False,
'process_id': '1',
'retransmit_interval': 5,
'router_id': '10.16.2.2',
'state': 'point-to-point',
'transmit_delay': 1,
'wait_interval': 40,
'total_dcbitless_lsa': 7,
'neighbors':
{'10.64.4.4': {},
},
'statistics':
{'adj_nbr_count': 1,
'nbr_count': 1,
'num_nbrs_suppress_hello': 1,
'multi_area_intf_count': 0,
}}}},
'sham_links':
{'10.21.33.33 10.151.22.22':
{'bfd':
{'enable': False},
'cost': 111,
'dead_interval': 13,
'demand_circuit': True,
'donotage_lsa': False,
'enable': False,
'flood_queue_length': 0,
'hello_timer': '00:00:00:864',
'hello_interval': 3,
'high_water_mark': 9,
'index': '2/2',
'interface_type': 'sham-link',
'ip_address': '0.0.0.0/0',
'last_flood_scan_length': 1,
'last_flood_scan_time_msec': 0,
'line_protocol': True,
'ls_ack_list': 'current',
'ls_ack_list_length': 0,
'max_flood_scan_length': 7,
'max_flood_scan_time_msec': 0,
'max_pkt_sz': 1500,
'mtu': 0,
'name': 'SL0',
'next': '0(0)/0(0)',
'passive': False,
'process_id': '1',
'retransmit_interval': 5,
'router_id': '10.36.3.3',
'state': 'point-to-point',
'transmit_delay': 7,
'wait_interval': 13,
'statistics':
{'adj_nbr_count': 0,
'nbr_count': 0,
'num_nbrs_suppress_hello': 0,
'multi_area_intf_count': 0,
},
'total_dcbitless_lsa': 1,
}}}}}}}},
'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'interfaces':
{'GigabitEthernet0/0/0/0':
{'bdr_ip_addr': '10.3.4.3',
'bdr_router_id': '10.36.3.3',
'bfd':
{'enable': False},
'cost': 1,
'dead_interval': 40,
'demand_circuit': False,
'dr_ip_addr': '10.3.4.4',
'dr_router_id': '10.64.4.4',
'enable': True,
'flood_queue_length': 0,
'hello_timer': '00:00:07:171',
'hello_interval': 10,
'index': '1/1',
'interface_type': 'broadcast',
'ip_address': '10.3.4.3/24',
'last_flood_scan_length': 1,
'last_flood_scan_time_msec': 0,
'line_protocol': True,
'ls_ack_list': 'current',
'ls_ack_list_length': 0,
'high_water_mark': 5,
'max_flood_scan_length': 3,
'max_flood_scan_time_msec': 0,
'max_pkt_sz': 1500,
'mtu': 1500,
'name': 'GigabitEthernet0/0/0/0',
'next': '0(0)/0(0)',
'passive': False,
'priority': 1,
'process_id': '1',
'retransmit_interval': 5,
'router_id': '10.36.3.3',
'state': 'bdr',
'transmit_delay': 1,
'wait_interval': 40,
'statistics':
{'adj_nbr_count': 1,
'nbr_count': 1,
'num_nbrs_suppress_hello': 0,
'multi_area_intf_count': 0,
},
'neighbors':
{'10.64.4.4':
{'dr_router_id': '10.64.4.4'},
},
},
'GigabitEthernet0/0/0/2':
{'bdr_router_id': '10.16.2.2',
'bdr_ip_addr': '10.2.3.2',
'bfd':
{'enable': False},
'cost': 1,
'dead_interval': 40,
'demand_circuit': False,
'dr_ip_addr': '10.2.3.3',
'dr_router_id': '10.36.3.3',
'enable': True,
'flood_queue_length': 0,
'hello_timer': '00:00:07:587',
'hello_interval': 10,
'index': '2/2',
'interface_type': 'broadcast',
'ip_address': '10.2.3.3/24',
'last_flood_scan_length': 1,
'last_flood_scan_time_msec': 0,
'line_protocol': True,
'ls_ack_list': 'current',
'ls_ack_list_length': 0,
'high_water_mark': 7,
'max_flood_scan_length': 3,
'max_flood_scan_time_msec': 0,
'max_pkt_sz': 1500,
'mtu': 1500,
'name': 'GigabitEthernet0/0/0/2',
'next': '0(0)/0(0)',
'passive': False,
'priority': 1,
'process_id': '1',
'retransmit_interval': 5,
'router_id': '10.36.3.3',
'state': 'dr',
'transmit_delay': 1,
'wait_interval': 40,
'statistics':
{'nbr_count': 1,
'adj_nbr_count': 1,
'multi_area_intf_count': 0,
'num_nbrs_suppress_hello': 0,
},
'neighbors':
{'10.16.2.2':
{'bdr_router_id': '10.16.2.2'},
},
},
'Loopback0':
{'bfd':
{'enable': False},
'cost': 1,
'demand_circuit': False,
'enable': True,
'interface_type': 'loopback',
'ip_address': '10.36.3.3/32',
'line_protocol': True,
'name': 'Loopback0',
'process_id': '1',
'router_id': '10.36.3.3'},
'tunnel-te31':
{'bfd':
{'enable': False},
'dead_interval': 40,
'demand_circuit': False,
'enable': True,
'flood_queue_length': 0,
'hello_interval': 10,
'index': '0/0',
'interface_type': 'point-to-point',
'ip_address': '0.0.0.0/0',
'last_flood_scan_length': 0,
'last_flood_scan_time_msec': 0,
'line_protocol': True,
'ls_ack_list': 'current',
'ls_ack_list_length': 0,
'high_water_mark': 0,
'max_flood_scan_length': 0,
'max_flood_scan_time_msec': 0,
'max_pkt_sz': 576,
'mtu': 0,
'name': 'tunnel-te31',
'next': '0(0)/0(0)',
'passive': True,
'process_id': '1',
'retransmit_interval': 5,
'router_id': '10.36.3.3',
'state': 'point-to-point',
'transmit_delay': 1,
'wait_interval': 0,
'statistics':
{'adj_nbr_count': 0,
'multi_area_intf_count': 0,
'nbr_count': 0,
'num_nbrs_suppress_hello': 0}}}}}}}}}}}}
# 'show ospf vrf all-inclusive neighbor detail'
ShowOspfVrfAllInclusiveNeighborDetail = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'total_neighbor_count': 2,
'virtual_links':
{'0.0.0.1 10.16.2.2':
{'neighbors':
{'10.16.2.2':
{'address': '10.229.4.4',
'bdr_ip_addr': '0.0.0.0',
'dead_timer': '00:00:21',
'dr_ip_addr': '0.0.0.0',
'first': '0(0)/0(0)',
'high_water_mark': 0,
'index': '1/3,',
'lls_options': '0x1 (LR)',
'ls_ack_list': 'NSR-sync',
'ls_ack_list_pending': 0,
'neighbor_router_id': '10.16.2.2',
'neighbor_uptime': '04:58:24',
'next': '0(0)/0(0)',
'options': '0x72',
'priority': 1,
'state': 'full',
'statistics':
{'nbr_event_count': 7,
'nbr_retrans_qlen': 0,
'total_retransmission': 0,
'total_dbd_retrans': 0,
'last_retrans_max_scan_length': 0,
'last_retrans_max_scan_time_msec': 0,
'last_retrans_scan_length': 0,
'last_retrans_scan_time_msec': 0}}}}},
'interfaces':
{'GigabitEthernet0/0/0/1':
{'neighbors':
{'10.36.3.3':
{'address': '10.229.3.3',
'bdr_ip_addr': '10.229.3.2',
'dead_timer': '00:00:31',
'dr_ip_addr': '10.229.3.3',
'first': '0(0)/0(0)',
'high_water_mark': 0,
'index': '2/2,',
'ls_ack_list': 'NSR-sync',
'ls_ack_list_pending': 0,
'neighbor_router_id': '10.36.3.3',
'neighbor_uptime': '05:00:13',
'next': '0(0)/0(0)',
'options': '0x42',
'priority': 1,
'state': 'full',
'statistics':
{'nbr_event_count': 6,
'nbr_retrans_qlen': 0,
'total_retransmission': 2,
'total_dbd_retrans': 0,
'last_retrans_max_scan_length': 1,
'last_retrans_max_scan_time_msec': 0,
'last_retrans_scan_length': 1,
'last_retrans_scan_time_msec': 0}}}}}}}}}}}},
'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'total_neighbor_count': 1,
'interfaces':
{'GigabitEthernet0/0/0/0':
{'neighbors':
{'10.64.4.4':
{'address': '10.229.4.4',
'bdr_ip_addr': '10.229.4.2',
'dead_timer': '00:00:32',
'dr_ip_addr': '10.229.4.4',
'first': '0(0)/0(0)',
'high_water_mark': 0,
'index': '1/1,',
'lls_options': '0x1 (LR)',
'ls_ack_list': 'NSR-sync',
'ls_ack_list_pending': 0,
'neighbor_router_id': '10.64.4.4',
'neighbor_uptime': '05:00:21',
'next': '0(0)/0(0)',
'options': '0x52',
'priority': 1,
'state': 'full',
'statistics':
{'nbr_event_count': 6,
'nbr_retrans_qlen': 0,
'total_retransmission': 0,
'total_dbd_retrans': 0,
'last_retrans_max_scan_length': 0,
'last_retrans_max_scan_time_msec': 0,
'last_retrans_scan_length': 0,
'last_retrans_scan_time_msec': 0}}}},
'GigabitEthernet0/0/0/2':
{'neighbors':
{'10.144.6.6':
{'address': '10.229.4.4',
'bdr_ip_addr': '10.229.4.2',
'dead_timer': '00:00:32',
'dr_ip_addr': '10.229.4.4',
'first': '0(0)/0(0)',
'high_water_mark': 0,
'index': '1/1,',
'lls_options': '0x1 (LR)',
'ls_ack_list': 'NSR-sync',
'ls_ack_list_pending': 0,
'neighbor_router_id': '10.144.6.6',
'neighbor_uptime': '05:00:21',
'next': '0(0)/0(0)',
'options': '0x52',
'priority': 1,
'state': 'full',
'statistics':
{'nbr_event_count': 6,
'nbr_retrans_qlen': 0,
'total_retransmission': 0,
'total_dbd_retrans': 0,
'last_retrans_max_scan_length': 0,
'last_retrans_max_scan_time_msec': 0,
'last_retrans_scan_length': 0,
'last_retrans_scan_time_msec': 0}}}}}}}}}}}}}}
# 'show ospf vrf all-inclusive database router'
ShowOspfVrfAllInclusiveDatabaseRouter_custom = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'database':
{'lsa_types':
{1:
{'lsa_type': 1,
'lsas':
{'10.229.11.11 10.229.11.11':
{'adv_router': '10.229.11.11',
'lsa_id': '10.229.11.11',
'ospfv2':
{'body':
{'router':
{'links':
{'10.186.5.1':
{'link_data': '10.186.5.1',
'link_id': '10.186.5.1',
'num_tos_metrics': 0,
'topologies':
{0:
{'metric': 1,
'mt_id': 0,
'tos': 0}},
'type': 'transit network'},
'10.151.22.22':
{'link_data': '0.0.0.14',
'link_id': '10.151.22.22',
'num_tos_metrics': 0,
'topologies':
{0:
{'metric': 111,
'mt_id': 0,
'tos': 0}},
'type': 'another router (point-to-point)'}},
'num_of_links': 2}},
'header':
{'adv_router': '10.229.11.11',
'age': 1713,
'area_border_router': True,
'as_boundary_router': True,
'checksum': '0x9ce3',
'length': 48,
'lsa_id': '10.229.11.11',
'option': 'None',
'option_desc': 'No '
'TOS-capability, '
'DC',
'routing_bit_enable': True,
'seq_num': '8000003e',
'type': 1}}}}}}}}}}}}}},
}}
# 'show ospf vrf all-inclusive database external'
ShowOspfVrfAllInclusiveDatabaseExternal_custom = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'database':
{'lsa_types':
{5:
{'lsa_type': 5,
'lsas':
{'10.115.55.55 10.100.5.5':
{'adv_router': '10.100.5.5',
'lsa_id': '10.115.55.55',
'ospfv2':
{'body':
{'external':
{'network_mask': '255.255.255.255',
'topologies':
{0:
{'external_route_tag': 0,
'flags': 'E',
'forwarding_address': '0.0.0.0',
'metric': 20,
'mt_id': 0,
'tos': 0}}}},
'header':
{'adv_router': '10.100.5.5',
'age': 520,
'checksum': '0x7d61',
'length': 36,
'lsa_id': '10.115.55.55',
'option': 'None',
'option_desc': 'No TOS-capability, DC',
'routing_bit_enable': True,
'seq_num': '90000006',
'type': 5}}}}}}}}}}}}}},
}}
# 'show ospf vrf all-inclusive database network'
ShowOspfVrfAllInclusiveDatabaseNetwork_custom = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'database':
{'lsa_types':
{2:
{'lsa_type': 2,
'lsas':
{'10.186.5.1 10.229.11.11':
{'adv_router': '10.229.11.11',
'lsa_id': '10.186.5.1',
'ospfv2':
{'body':
{'network':
{'attached_routers':
{'10.229.11.11': {},
'10.115.55.55': {}},
'network_mask': '255.255.255.0'}},
'header':
{'adv_router': '10.229.11.11',
'age': 522,
'checksum': '0xddd9',
'length': 32,
'lsa_id': '10.186.5.1',
'option': 'None',
'option_desc': 'No '
'TOS-capability, '
'DC',
'routing_bit_enable': True,
'seq_num': '80000033',
'type': 2}}}}}}}}}}}}}},
}}
# 'show ospf vrf all-inclusive database summary'
ShowOspfVrfAllInclusiveDatabaseSummary_custom = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'database':
{'lsa_types':
{3:
{'lsa_type': 3,
'lsas':
{'10.186.4.0 10.36.3.3':
{'adv_router': '10.36.3.3',
'lsa_id': '10.186.4.0',
'ospfv2':
{'body':
{'summary':
{'network_mask': '255.255.255.0',
'topologies':
{0:
{'metric': 75565,
'mt_id': 0,
'tos': 0}}}},
'header':
{'adv_router': '10.36.3.3',
'age': 608,
'checksum': '0xaa4a',
'length': 28,
'lsa_id': '10.186.4.0',
'option': 'None',
'option_desc': 'No '
'TOS-capability, '
'DC',
'seq_num': '90000001',
'type': 3}}}}}}}}}}}}}},
}}
# 'show ospf vrf all-inclusive database opaque-area'
ShowOspfVrfAllInclusiveDatabaseOpaqueArea_custom = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'database':
{'lsa_types':
{10:
{'lsa_type': 10,
'lsas':
{'10.1.0.7 10.16.2.2':
{'adv_router': '10.16.2.2',
'lsa_id': '10.1.0.7',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'igp_metric': 1,
'link_id': '10.3.2.2',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'10.3.2.2': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'total_priority': 8,
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.16.2.2',
'age': 420,
'checksum': '0x5ec',
'length': 160,
'lsa_id': '10.1.0.7',
'opaque_id': 6,
'opaque_type': 1,
'option': 'None',
'option_desc': 'No '
'TOS-capability, '
'DC',
'seq_num': '90000002',
'type': 10}}}}}}}}}}}}}},
}}
# 'show ospf vrf all-inclusive interface'
ShowOspfVrfAllInclusiveInterface_custom = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'interfaces':
{'GigabitEthernet0/0/0/1':
{'bdr_ip_addr': '10.19.7.3',
'bdr_router_id': '10.36.3.3',
'bfd':
{'enable': True,
'interval': 12345,
'mode': 'default',
'multiplier': 50},
'cost': 1,
'dead_interval': 40,
'demand_circuit': False,
'dr_ip_addr': '10.19.7.7',
'dr_router_id': '10.1.77.77',
'enable': True,
'flood_queue_length': 0,
'hello_timer': '00:00:03:040',
'hello_interval': 10,
'index': '1/1',
'interface_type': 'broadcast',
'ip_address': '10.19.7.3/24',
'last_flood_scan_length': 1,
'last_flood_scan_time_msec': 0,
'line_protocol': True,
'ls_ack_list': 'current',
'ls_ack_list_length': 0,
'high_water_mark': 11,
'max_flood_scan_length': 5,
'max_flood_scan_time_msec': 0,
'max_pkt_sz': 1500,
'mtu': 1500,
'name': 'GigabitEthernet0/0/0/1',
'next': '0(0)/0(0)',
'passive': False,
'priority': 1,
'process_id': '1',
'retransmit_interval': 5,
'router_id': '10.36.3.3',
'state': 'bdr',
'transmit_delay': 1,
'wait_interval': 40,
'statistics':
{'adj_nbr_count': 1,
'nbr_count': 1,
'num_nbrs_suppress_hello': 0,
'multi_area_intf_count': 0,
},
}},
'virtual_links':
{'0.0.0.1 10.16.2.2':
{'bfd':
{'enable': False},
'cost': 1,
'dead_interval': 40,
'demand_circuit': True,
'donotage_lsa': False,
'enable': False,
'flood_queue_length': 0,
'hello_timer': '00:00:01:281',
'hello_interval': 10,
'high_water_mark': 20,
'index': '4/7',
'interface_type': 'virtual-link',
'ip_address': '0.0.0.0/0',
'last_flood_scan_length': 7,
'last_flood_scan_time_msec': 0,
'line_protocol': True,
'ls_ack_list': 'current',
'ls_ack_list_length': 0,
'max_flood_scan_length': 7,
'max_flood_scan_time_msec': 0,
'max_pkt_sz': 1500,
'mtu': 0,
'name': 'VL0',
'next': '0(0)/0(0)',
'passive': False,
'process_id': '1',
'retransmit_interval': 5,
'router_id': '10.16.2.2',
'state': 'point-to-point',
'transmit_delay': 1,
'wait_interval': 40,
'total_dcbitless_lsa': 7,
'neighbors':
{'10.64.4.4': {},
},
'statistics':
{'adj_nbr_count': 1,
'nbr_count': 1,
'num_nbrs_suppress_hello': 1,
'multi_area_intf_count': 0,
}}}},
'sham_links':
{'10.21.33.33 10.151.22.22':
{'bfd':
{'enable': False},
'cost': 111,
'dead_interval': 13,
'demand_circuit': True,
'donotage_lsa': False,
'enable': False,
'flood_queue_length': 0,
'hello_timer': '00:00:00:864',
'hello_interval': 3,
'high_water_mark': 9,
'index': '2/2',
'interface_type': 'sham-link',
'ip_address': '0.0.0.0/0',
'last_flood_scan_length': 1,
'last_flood_scan_time_msec': 0,
'line_protocol': True,
'ls_ack_list': 'current',
'ls_ack_list_length': 0,
'max_flood_scan_length': 7,
'max_flood_scan_time_msec': 0,
'max_pkt_sz': 1500,
'mtu': 0,
'name': 'SL0',
'next': '0(0)/0(0)',
'passive': False,
'process_id': '1',
'retransmit_interval': 5,
'router_id': '10.36.3.3',
'state': 'point-to-point',
'transmit_delay': 7,
'wait_interval': 13,
'statistics':
{'adj_nbr_count': 0,
'nbr_count': 0,
'num_nbrs_suppress_hello': 0,
'multi_area_intf_count': 0,
},
'total_dcbitless_lsa': 1,
}}}}}}}},
}}
# 'show ospf vrf all-inclusive neighbor detail'
ShowOspfVrfAllInclusiveNeighborDetail_custom = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.1':
{'total_neighbor_count': 2,
'virtual_links':
{'0.0.0.1 10.16.2.2':
{'neighbors':
{'10.16.2.2':
{'address': '10.229.4.4',
'bdr_ip_addr': '0.0.0.0',
'dead_timer': '00:00:21',
'dr_ip_addr': '0.0.0.0',
'first': '0(0)/0(0)',
'high_water_mark': 0,
'index': '1/3,',
'lls_options': '0x1 (LR)',
'ls_ack_list': 'NSR-sync',
'ls_ack_list_pending': 0,
'neighbor_router_id': '10.16.2.2',
'neighbor_uptime': '04:58:24',
'next': '0(0)/0(0)',
'options': '0x72',
'priority': 1,
'state': 'full',
'statistics':
{'nbr_event_count': 7,
'nbr_retrans_qlen': 0,
'total_retransmission': 0,
'total_dbd_retrans': 0,
'last_retrans_max_scan_length': 0,
'last_retrans_max_scan_time_msec': 0,
'last_retrans_scan_length': 0,
'last_retrans_scan_time_msec': 0}}}}},
'interfaces':
{'GigabitEthernet0/0/0/1':
{'neighbors':
{'10.36.3.3':
{'address': '10.229.3.3',
'bdr_ip_addr': '10.229.3.2',
'dead_timer': '00:00:31',
'dr_ip_addr': '10.229.3.3',
'first': '0(0)/0(0)',
'high_water_mark': 0,
'index': '2/2,',
'ls_ack_list': 'NSR-sync',
'ls_ack_list_pending': 0,
'neighbor_router_id': '10.36.3.3',
'neighbor_uptime': '05:00:13',
'next': '0(0)/0(0)',
'options': '0x42',
'priority': 1,
'state': 'full',
'statistics':
{'nbr_event_count': 6,
'nbr_retrans_qlen': 0,
'total_retransmission': 2,
'total_dbd_retrans': 0,
'last_retrans_max_scan_length': 1,
'last_retrans_max_scan_time_msec': 0,
'last_retrans_scan_length': 1,
'last_retrans_scan_time_msec': 0}}}}}}}}}}}},
}}
############################################################################
# OSPF INFO
############################################################################
OspfInfo = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'adjacency_stagger':
{'disable': False,
'initial_number': 2,
'maximum_number': 64},
'areas':
{'0.0.0.1':
{'area_id': '0.0.0.1',
'area_type': 'stub',
'database':
{'lsa_types':
{1:
{'lsa_type': 1,
'lsas':
{'10.229.11.11 10.229.11.11':
{'adv_router': '10.229.11.11',
'lsa_id': '10.229.11.11',
'ospfv2':
{'body':
{'router':
{'links':
{'10.186.5.1':
{'link_data': '10.186.5.1',
'link_id': '10.186.5.1',
'topologies':
{0:
{'metric': 1,
'mt_id': 0}},
'type': 'transit network'},
'10.151.22.22':
{'link_data': '0.0.0.14',
'link_id': '10.151.22.22',
'topologies':
{0:
{'metric': 111,
'mt_id': 0}},
'type': 'another router (point-to-point)'}},
'num_of_links': 2}},
'header':
{'adv_router': '10.229.11.11',
'age': 1713,
'checksum': '0x9ce3',
'length': 48,
'lsa_id': '10.229.11.11',
'option': 'None',
'seq_num': '8000003e',
'type': 1}}}}},
2:
{'lsa_type': 2,
'lsas':
{'10.186.5.1 10.229.11.11':
{'adv_router': '10.229.11.11',
'lsa_id': '10.186.5.1',
'ospfv2':
{'body':
{'network':
{'attached_routers':
{'10.229.11.11': {},
'10.115.55.55': {}},
'network_mask': '255.255.255.0'}},
'header':
{'adv_router': '10.229.11.11',
'age': 522,
'checksum': '0xddd9',
'length': 32,
'lsa_id': '10.186.5.1',
'option': 'None',
'seq_num': '80000033',
'type': 2}}}}},
3:
{'lsa_type': 3,
'lsas':
{'10.186.4.0 10.36.3.3':
{'adv_router': '10.36.3.3',
'lsa_id': '10.186.4.0',
'ospfv2':
{'body':
{'summary':
{'network_mask': '255.255.255.0',
'topologies':
{0:
{'metric': 75565,
'mt_id': 0}}}},
'header':
{'adv_router': '10.36.3.3',
'age': 608,
'checksum': '0xaa4a',
'length': 28,
'lsa_id': '10.186.4.0',
'option': 'None',
'seq_num': '90000001',
'type': 3}}}}},
5:
{'lsa_type': 5,
'lsas':
{'10.115.55.55 10.100.5.5':
{'adv_router': '10.100.5.5',
'lsa_id': '10.115.55.55',
'ospfv2':
{'body':
{'external':
{'network_mask': '255.255.255.255',
'topologies':
{0:
{'external_route_tag': 0,
'flags': 'E',
'forwarding_address': '0.0.0.0',
'metric': 20,
'mt_id': 0}}}},
'header':
{'adv_router': '10.100.5.5',
'age': 520,
'checksum': '0x7d61',
'length': 36,
'lsa_id': '10.115.55.55',
'option': 'None',
'seq_num': '90000006',
'type': 5}}}}},
10:
{'lsa_type': 10,
'lsas':
{'10.1.0.7 10.16.2.2':
{'adv_router': '10.16.2.2',
'lsa_id': '10.1.0.7',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'link_id': '10.3.2.2',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'10.3.2.2': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.16.2.2',
'age': 420,
'checksum': '0x5ec',
'length': 160,
'lsa_id': '10.1.0.7',
'opaque_id': 6,
'opaque_type': 1,
'option': 'None',
'seq_num': '90000002',
'type': 10}}}}}}},
'default_cost': 111,
'interfaces':
{'GigabitEthernet0/0/0/1':
{'bdr_ip_addr': '10.19.7.3',
'bdr_router_id': '10.36.3.3',
'bfd':
{'enable': True,
'interval': 12345,
'multiplier': 50},
'cost': 1,
'dead_interval': 40,
'demand_circuit': False,
'dr_ip_addr': '10.19.7.7',
'dr_router_id': '10.1.77.77',
'enable': True,
'hello_interval': 10,
'hello_timer': '00:00:03:040',
'interface_type': 'broadcast',
'name': 'GigabitEthernet0/0/0/1',
'neighbors':
{'10.36.3.3':
{'neighbor_router_id': '10.36.3.3',
'address': '10.229.3.3',
'bdr_ip_addr': '10.229.3.2',
'dead_timer': '00:00:31',
'dr_ip_addr': '10.229.3.3',
'state': 'full',
'statistics':
{'nbr_event_count': 6,
'nbr_retrans_qlen': 0}}},
'passive': False,
'priority': 1,
'retransmit_interval': 5,
'state': 'bdr',
'transmit_delay': 1}},
'mpls':
{'te':
{'enable': True}},
'ranges':
{'10.4.0.0/16':
{'advertise': True,
'prefix': '10.4.0.0/16'}},
'sham_links':
{'10.21.33.33 10.151.22.22':
{'cost': 111,
'dead_interval': 13,
'demand_circuit': True,
'hello_interval': 3,
'hello_timer': '00:00:00:772',
'local_id': '10.21.33.33',
'name': 'SL0',
'remote_id': '10.151.22.22',
'retransmit_interval': 5,
'state': 'point-to-point,',
'transit_area_id': '0.0.0.1',
'transmit_delay': 7}},
'statistics':
{'area_scope_lsa_cksum_sum': '0x04f437',
'area_scope_lsa_count': 11,
'spf_runs_count': 79},
'summary': True,
'virtual_links':
{'0.0.0.1 10.16.2.2':
{'authentication':
{'auth_trailer_key':
{'crypto_algorithm': 'simple'}},
'cost': 65535,
'dead_interval': 16,
'demand_circuit': True,
'hello_interval': 4,
'hello_timer': '00:00:03:179',
'name': 'VL0',
'neighbors':
{'10.16.2.2':
{'neighbor_router_id': '10.16.2.2',
'address': '10.229.4.4',
'bdr_ip_addr': '0.0.0.0',
'dead_timer': '00:00:21',
'dr_ip_addr': '0.0.0.0',
'state': 'full',
'statistics':
{'nbr_event_count': 7,
'nbr_retrans_qlen': 0}}},
'retransmit_interval': 44,
'router_id': '10.16.2.2',
'state': 'point-to-point,',
'transit_area_id': '0.0.0.1',
'transmit_delay': 5}}}},
'database_control':
{'max_lsa': 123},
'graceful_restart':
{'cisco':
{'enable': True,
'type': 'ietf'}},
'maximum_interfaces': 1024,
'mpls':
{'te':
{'router_id': '10.36.3.3'}},
'nsr':
{'enable': True},
'redistribution':
{'bgp':
{'bgp_id': 100,
'metric': 111},
'connected':
{'enabled': True,
'metric': 10},
'isis':
{'isis_pid': '10',
'metric': 3333},
'max_prefix':
{'num_of_prefix': 4000,
'prefix_thld': 70,
'warn_only': False},
'static':
{'enabled': True}},
'router_id': '10.36.3.3',
'spf_control':
{'throttle':
{'lsa':
{'hold': 200,
'maximum': 5000,
'start': 50},
'spf':
{'hold': 200,
'maximum': 5000,
'start': 50}}},
'stub_router':
{'always':
{'always': False,
'external_lsa': False,
'include_stub': False,
'summary_lsa': False}}}}}}},
'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'adjacency_stagger':
{'disable': False,
'initial_number': 2,
'maximum_number': 64},
'areas':
{'0.0.0.0':
{'area_id': '0.0.0.0',
'area_type': 'normal',
'database':
{'lsa_types':
{1:
{'lsa_type': 1,
'lsas':
{'10.4.1.1 10.4.1.1':
{'adv_router': '10.4.1.1',
'lsa_id': '10.4.1.1',
'ospfv2':
{'body':
{'router':
{'links':
{'10.4.1.1':
{'link_data': '255.255.255.255',
'link_id': '10.4.1.1',
'topologies':
{0:
{'metric': 1,
'mt_id': 0}},
'type': 'stub network'},
'10.1.2.1':
{'link_data': '10.1.2.1',
'link_id': '10.1.2.1',
'topologies':
{0:
{'metric': 1,
'mt_id': 0}},
'type': 'transit network'},
'10.1.4.4':
{'link_data': '10.1.4.1',
'link_id': '10.1.4.4',
'topologies':
{0:
{'metric': 1,
'mt_id': 0}},
'type': 'transit network'}},
'num_of_links': 3}},
'header':
{'adv_router': '10.4.1.1',
'age': 1802,
'checksum': '0x6228',
'length': 60,
'lsa_id': '10.4.1.1',
'option': 'None',
'seq_num': '8000003d',
'type': 1}}}}},
2:
{'lsa_type': 2,
'lsas':
{'10.1.2.1 10.4.1.1':
{'adv_router': '10.4.1.1',
'lsa_id': '10.1.2.1',
'ospfv2':
{'body':
{'network':
{'attached_routers':
{'10.4.1.1': {},
'10.16.2.2': {}},
'network_mask': '255.255.255.0'}},
'header':
{'adv_router': '10.4.1.1',
'age': 1844,
'checksum': '0x3dd0',
'length': 32,
'lsa_id': '10.1.2.1',
'option': 'None',
'seq_num': '8000000f',
'type': 2}}}}},
3:
{'lsa_type': 3,
'lsas':
{'10.186.3.0 10.16.2.2':
{'adv_router': '10.16.2.2',
'lsa_id': '10.186.3.0',
'ospfv2':
{'body':
{'summary':
{'network_mask': '255.255.255.0',
'topologies':
{0:
{'metric': 65575,
'mt_id': 0}}}},
'header':
{'adv_router': '10.16.2.2',
'age': 520,
'checksum': '0xaa4a',
'length': 28,
'lsa_id': '10.186.3.0',
'option': 'None',
'seq_num': '80000001',
'type': 3}}}}},
5:
{'lsa_type': 5,
'lsas':
{'10.94.44.44 10.64.4.4':
{'adv_router': '10.64.4.4',
'lsa_id': '10.94.44.44',
'ospfv2':
{'body':
{'external':
{'network_mask': '255.255.255.255',
'topologies':
{0:
{'external_route_tag': 0,
'flags': 'E',
'forwarding_address': '0.0.0.0',
'metric': 20,
'mt_id': 0}}}},
'header':
{'adv_router': '10.64.4.4',
'age': 608,
'checksum': '0x7d61',
'length': 36,
'lsa_id': '10.94.44.44',
'option': 'None',
'seq_num': '80000002',
'type': 5}}}}},
10:
{'lsa_type': 10,
'lsas':
{'10.1.0.6 10.36.3.3':
{'adv_router': '10.36.3.3',
'lsa_id': '10.1.0.6',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0',
'link_id': '10.2.3.3',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'10.2.3.3': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.36.3.3',
'age': 1175,
'checksum': '0x5ec',
'length': 160,
'lsa_id': '10.1.0.6',
'opaque_id': 6,
'opaque_type': 1,
'option': 'None',
'seq_num': '80000002',
'type': 10}}}}}}},
'interfaces':
{'GigabitEthernet0/0/0/0':
{'bdr_ip_addr': '10.3.4.3',
'bdr_router_id': '10.36.3.3',
'bfd':
{'enable': False},
'cost': 1,
'dead_interval': 40,
'demand_circuit': False,
'dr_ip_addr': '10.3.4.4',
'dr_router_id': '10.64.4.4',
'enable': True,
'hello_interval': 10,
'hello_timer': '00:00:07:171',
'interface_type': 'broadcast',
'name': 'GigabitEthernet0/0/0/0',
'neighbors':
{'10.64.4.4':
{'neighbor_router_id': '10.64.4.4',
'address': '10.229.4.4',
'bdr_ip_addr': '10.229.4.2',
'dead_timer': '00:00:32',
'dr_ip_addr': '10.229.4.4',
'state': 'full',
'statistics':
{'nbr_event_count': 6,
'nbr_retrans_qlen': 0}}},
'passive': False,
'priority': 1,
'retransmit_interval': 5,
'state': 'bdr',
'transmit_delay': 1},
'GigabitEthernet0/0/0/2':
{'bdr_ip_addr': '10.2.3.2',
'bdr_router_id': '10.16.2.2',
'bfd': {'enable': False},
'cost': 1,
'dead_interval': 40,
'demand_circuit': False,
'dr_ip_addr': '10.2.3.3',
'dr_router_id': '10.36.3.3',
'enable': True,
'hello_interval': 10,
'hello_timer': '00:00:07:587',
'interface_type': 'broadcast',
'name': 'GigabitEthernet0/0/0/2',
'neighbors':
{'10.144.6.6':
{'neighbor_router_id': '10.144.6.6',
'address': '10.229.4.4',
'bdr_ip_addr': '10.229.4.2',
'dead_timer': '00:00:32',
'dr_ip_addr': '10.229.4.4',
'state': 'full',
'statistics':
{'nbr_event_count': 6,
'nbr_retrans_qlen': 0}}},
'passive': False,
'priority': 1,
'retransmit_interval': 5,
'state': 'dr',
'transmit_delay': 1},
'Loopback0':
{'bfd':
{'enable': False},
'cost': 1,
'demand_circuit': False,
'enable': True,
'interface_type': 'loopback',
'name': 'Loopback0'},
'tunnel-te31':
{'bfd':
{'enable': False},
'dead_interval': 40,
'demand_circuit': False,
'enable': True,
'hello_interval': 10,
'interface_type': 'point-to-point',
'name': 'tunnel-te31',
'passive': True,
'retransmit_interval': 5,
'state': 'point-to-point',
'transmit_delay': 1}},
'mpls':
{'te':
{'enable': True}},
'statistics':
{'area_scope_lsa_cksum_sum': '0x0a2fb5',
'area_scope_lsa_count': 19,
'spf_runs_count': 26}}},
'maximum_interfaces': 1024,
'mpls':
{'ldp':
{'ldp_igp_sync': True},
'te':
{'router_id': '10.36.3.3'}},
'nsr':
{'enable': True},
'preference':
{'multi_values':
{'external': 114,
'granularity':
{'detail':
{'inter_area': 113,
'intra_area': 112}}},
'single_value':
{'all': 110}},
'router_id': '10.36.3.3',
'spf_control':
{'throttle':
{'lsa':
{'hold': 200,
'maximum': 5000,
'start': 50},
'spf':
{'hold': 200,
'maximum': 5000,
'start': 50}}},
'stub_router':
{'always':
{'always': True,
'external_lsa': True,
'include_stub': True,
'summary_lsa': True},
'on_startup':
{'external_lsa': True,
'include_stub': True,
'on_startup': 5,
'summary_lsa': True},
'on_switchover':
{'external_lsa': True,
'include_stub': True,
'on_switchover': 10,
'summary_lsa': True}}}}}}}}}
OspfInfo_custom = {
'vrf':
{'VRF1':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'adjacency_stagger':
{'disable': False,
'initial_number': 2,
'maximum_number': 64},
'areas':
{'0.0.0.1':
{'area_id': '0.0.0.1',
'area_type': 'stub',
'database':
{'lsa_types':
{1:
{'lsa_type': 1,
'lsas':
{'10.229.11.11 10.229.11.11':
{'adv_router': '10.229.11.11',
'lsa_id': '10.229.11.11',
'ospfv2':
{'body':
{'router':
{'links':
{'10.186.5.1':
{'link_data': '10.186.5.1',
'link_id': '10.186.5.1',
'topologies':
{0:
{'metric': 1,
'mt_id': 0}},
'type': 'transit network'},
'10.151.22.22':
{'link_data': '0.0.0.14',
'link_id': '10.151.22.22',
'topologies':
{0:
{'metric': 111,
'mt_id': 0}},
'type': 'another router (point-to-point)'}},
'num_of_links': 2}},
'header':
{'adv_router': '10.229.11.11',
'age': 1713,
'checksum': '0x9ce3',
'length': 48,
'lsa_id': '10.229.11.11',
'option': 'None',
'seq_num': '8000003e',
'type': 1}}}}},
2:
{'lsa_type': 2,
'lsas':
{'10.186.5.1 10.229.11.11':
{'adv_router': '10.229.11.11',
'lsa_id': '10.186.5.1',
'ospfv2':
{'body':
{'network':
{'attached_routers':
{'10.229.11.11': {},
'10.115.55.55': {}},
'network_mask': '255.255.255.0'}},
'header':
{'adv_router': '10.229.11.11',
'age': 522,
'checksum': '0xddd9',
'length': 32,
'lsa_id': '10.186.5.1',
'option': 'None',
'seq_num': '80000033',
'type': 2}}}}},
3:
{'lsa_type': 3,
'lsas':
{'10.186.4.0 10.36.3.3':
{'adv_router': '10.36.3.3',
'lsa_id': '10.186.4.0',
'ospfv2':
{'body':
{'summary':
{'network_mask': '255.255.255.0',
'topologies':
{0:
{'metric': 75565,
'mt_id': 0}}}},
'header':
{'adv_router': '10.36.3.3',
'age': 608,
'checksum': '0xaa4a',
'length': 28,
'lsa_id': '10.186.4.0',
'option': 'None',
'seq_num': '90000001',
'type': 3}}}}},
5:
{'lsa_type': 5,
'lsas':
{'10.115.55.55 10.100.5.5':
{'adv_router': '10.100.5.5',
'lsa_id': '10.115.55.55',
'ospfv2':
{'body':
{'external':
{'network_mask': '255.255.255.255',
'topologies':
{0:
{'external_route_tag': 0,
'flags': 'E',
'forwarding_address': '0.0.0.0',
'metric': 20,
'mt_id': 0}}}},
'header':
{'adv_router': '10.100.5.5',
'age': 520,
'checksum': '0x7d61',
'length': 36,
'lsa_id': '10.115.55.55',
'option': 'None',
'seq_num': '90000006',
'type': 5}}}}},
10:
{'lsa_type': 10,
'lsas':
{'10.1.0.7 10.16.2.2':
{'adv_router': '10.16.2.2',
'lsa_id': '10.1.0.7',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'link_id': '10.3.2.2',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'10.3.2.2': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.16.2.2',
'age': 420,
'checksum': '0x5ec',
'length': 160,
'lsa_id': '10.1.0.7',
'opaque_id': 6,
'opaque_type': 1,
'option': 'None',
'seq_num': '90000002',
'type': 10}}}}}}},
'default_cost': 111,
'interfaces':
{'GigabitEthernet0/0/0/1':
{'bdr_ip_addr': '10.19.7.3',
'bdr_router_id': '10.36.3.3',
'bfd':
{'enable': True,
'interval': 12345,
'multiplier': 50},
'cost': 1,
'dead_interval': 40,
'demand_circuit': False,
'dr_ip_addr': '10.19.7.7',
'dr_router_id': '10.1.77.77',
'enable': True,
'hello_interval': 10,
'hello_timer': '00:00:03:040',
'interface_type': 'broadcast',
'name': 'GigabitEthernet0/0/0/1',
'neighbors':
{'10.36.3.3':
{'neighbor_router_id': '10.36.3.3',
'address': '10.229.3.3',
'bdr_ip_addr': '10.229.3.2',
'dead_timer': '00:00:31',
'dr_ip_addr': '10.229.3.3',
'state': 'full',
'statistics':
{'nbr_event_count': 6,
'nbr_retrans_qlen': 0}}},
'passive': False,
'priority': 1,
'retransmit_interval': 5,
'state': 'bdr',
'transmit_delay': 1}},
'mpls':
{'te':
{'enable': True}},
'ranges':
{'10.4.0.0/16':
{'advertise': True,
'prefix': '10.4.0.0/16'}},
'sham_links':
{'10.21.33.33 10.151.22.22':
{'cost': 111,
'dead_interval': 13,
'demand_circuit': True,
'hello_interval': 3,
'hello_timer': '00:00:00:772',
'local_id': '10.21.33.33',
'name': 'SL0',
'remote_id': '10.151.22.22',
'retransmit_interval': 5,
'state': 'point-to-point,',
'transit_area_id': '0.0.0.1',
'transmit_delay': 7}},
'statistics':
{'area_scope_lsa_cksum_sum': '0x04f437',
'area_scope_lsa_count': 11,
'spf_runs_count': 79},
'summary': True,
'virtual_links':
{'0.0.0.1 10.16.2.2':
{'authentication':
{'auth_trailer_key':
{'crypto_algorithm': 'simple'}},
'cost': 65535,
'dead_interval': 16,
'demand_circuit': True,
'hello_interval': 4,
'hello_timer': '00:00:03:179',
'name': 'VL0',
'neighbors':
{'10.16.2.2':
{'neighbor_router_id': '10.16.2.2',
'address': '10.229.4.4',
'bdr_ip_addr': '0.0.0.0',
'dead_timer': '00:00:21',
'dr_ip_addr': '0.0.0.0',
'state': 'full',
'statistics':
{'nbr_event_count': 7,
'nbr_retrans_qlen': 0}}},
'retransmit_interval': 44,
'router_id': '10.16.2.2',
'state': 'point-to-point,',
'transit_area_id': '0.0.0.1',
'transmit_delay': 5}}}},
'database_control':
{'max_lsa': 123},
'graceful_restart':
{'cisco':
{'enable': True,
'type': 'ietf'}},
'maximum_interfaces': 1024,
'mpls':
{'te':
{'router_id': '10.36.3.3'}},
'nsr':
{'enable': True},
'redistribution':
{'bgp':
{'bgp_id': 100,
'metric': 111},
'connected':
{'enabled': True,
'metric': 10},
'isis':
{'isis_pid': '10',
'metric': 3333},
'max_prefix':
{'num_of_prefix': 4000,
'prefix_thld': 70,
'warn_only': False},
'static':
{'enabled': True}},
'router_id': '10.36.3.3',
'spf_control':
{'throttle':
{'lsa':
{'hold': 200,
'maximum': 5000,
'start': 50},
'spf':
{'hold': 200,
'maximum': 5000,
'start': 50}}},
'stub_router':
{'always':
{'always': False,
'external_lsa': False,
'include_stub': False,
'summary_lsa': False}}}}}}},
}}
|
286e7d867b36f68a7658baceceb67f2289515319
|
40195e6f86bf8620850f0c56e98eae5693e88277
|
/coremltools/converters/mil/backend/nn/passes/conv1d_decomposition.py
|
48c207c55d81fa4de27eff38dcfb7cf447b13c7e
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
apple/coremltools
|
009dfa7154d34cab8edcafa618e689e407521f50
|
feed174188f7773631a3d574e1ff9889a135c986
|
refs/heads/main
| 2023-09-01T23:26:13.491955
| 2023-08-31T18:44:31
| 2023-08-31T18:44:31
| 95,862,535
| 3,742
| 705
|
BSD-3-Clause
| 2023-09-14T17:33:58
| 2017-06-30T07:39:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,705
|
py
|
conv1d_decomposition.py
|
# Copyright (c) 2023, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import Block
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import Operation
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
from coremltools.converters.mil.mil.passes.helper import block_context_manager
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
@register_pass(namespace="nn_backend")
class decompose_conv1d(AbstractGraphPass):
"""
NeuralNetwork does not support conv1d natively,
instead it decomposes conv1d into expand_dims -> conv2d -> squeeze
Let us decompose conv1d for NN,
so we may have a chance to optimize expand_dims -> conv2d -> squeeze
Given:
%2 = conv(%1), %1.rank = 3
...
Result:
%3 = expand_dims(%1, axes=-2)
%4 = conv(%3)
%2 = squeeze(%4, axes=-2)
...
"""
def apply(self, prog):
for f in prog.functions.values():
self._decompose_conv1d_block(f)
@block_context_manager
def _decompose_conv1d_block(self, block: Block):
def help_decompose_conv1d_block(block: Block) -> bool:
for op in list(block.operations):
for b in op.blocks:
block_changed = True
while block_changed:
block_changed = help_decompose_conv1d_block(b)
# must be conv1d
if op.op_type != "conv" or op.x.rank != 3:
continue
if self._try_apply_transform(op, block):
# has to break as the downstream iterator is affected
return True
return False
block_changed = True
while block_changed:
block_changed = help_decompose_conv1d_block(block)
@staticmethod
def _try_apply_transform(conv_op: Operation, block: Block) -> bool:
# create `expand_dims`
expand_out = mb.expand_dims(x=conv_op.x, axes=(-2,), before_op=conv_op)
# prepare `conv2d`
conv_kwargs = {"x": expand_out, "before_op": conv_op}
# inherit `pad_type`, `groups`, `bias` from `conv1d`
conv_kwargs["pad_type"] = conv_op.inputs["pad_type"].val
conv_kwargs["groups"] = conv_op.inputs["groups"].val
bias = conv_op.inputs.get("bias", None)
if bias is not None:
conv_kwargs["bias"] = bias
# expand `weight`, `strides`, `pad`, `dilations` from `conv1d`
conv_kwargs["weight"] = mb.expand_dims(
x=conv_op.inputs["weight"], axes=(-2,), before_op=conv_op
)
conv_kwargs["strides"] = (1, conv_op.inputs["strides"].val[-1])
conv_kwargs["pad"] = (0, 0, conv_op.inputs["pad"].val[-2], conv_op.inputs["pad"].val[-1])
conv_kwargs["dilations"] = (1, conv_op.inputs["dilations"].val[-1])
# compose `conv2d`
conv_out = mb.conv(**conv_kwargs)
# create `squeeze`
squeeze_out = mb.squeeze(
x=conv_out, axes=(-2,), name=conv_op.outputs[0].name, before_op=conv_op
)
# try replacing `conv1d` output
# with the new `expand_dims` -> `conv2d` -> `squeeze` output
if conv_op.enclosing_block.try_replace_uses_of_var_after_op(
anchor_op=conv_op, old_var=conv_op.outputs[0], new_var=squeeze_out
):
# remove `conv1d`
block.remove_ops([conv_op])
return True
return False
|
54fbdac373dd47fdbf4da4baba2e188fbe3baa73
|
9a43f4b0d532fe5fab90a6cbbf62b3dd6c65a4a6
|
/tests/integration/test_poller.py
|
01a26ddc8c9fbd590312122e8819138c0e2863b7
|
[
"Apache-2.0"
] |
permissive
|
saltstack/pepper
|
9cf0acc5ab3923c026ff612485bcfcc11e58b8e4
|
8ab18e8fb81c275e19c1345146116cf14b9f1223
|
refs/heads/develop
| 2023-08-07T03:10:32.344940
| 2023-05-03T12:53:12
| 2023-05-03T12:53:12
| 8,509,557
| 233
| 138
|
NOASSERTION
| 2023-05-04T20:10:40
| 2013-03-01T20:21:48
|
Python
|
UTF-8
|
Python
| false
| false
| 757
|
py
|
test_poller.py
|
# -*- coding: utf-8 -*-
def test_local_poll(pepper_cli, session_minion_id):
'''Test the returns poller for localclient'''
ret = pepper_cli('--fail-if-incomplete', '*', 'test.sleep', '1')
assert ret[session_minion_id] is True
assert len(ret) == 1
def test_local_poll_long(pepper_cli, session_minion_id):
'''Test the returns poller for localclient'''
ret = pepper_cli('--fail-if-incomplete', '*', 'test.sleep', '30')
assert ret[session_minion_id] is True
assert len(ret) == 1
def test_local_poll_timeout(pepper_cli, session_minion_id):
'''Test the returns poller for localclient'''
ret = pepper_cli('--timeout=5', '--fail-if-incomplete', '*', 'test.sleep', '30')
assert ret == {'Failed': [session_minion_id]}
|
7257bcd2f4f9f87886e162679e92000c4b6eb257
|
5a07e1afa5d172dcd4288f12636edd9c53148073
|
/tests/benchmarks/test_benchmark.py
|
35fbf0c1ece450fb6003a1066a03f041dc46e1e2
|
[
"Apache-2.0"
] |
permissive
|
scikit-hep/pyhf
|
3df3f9b12d1b362919629275b8746060833713f3
|
205eecfb0b57591eb6b70e98b01511797340a0c7
|
refs/heads/main
| 2023-09-02T18:50:35.990103
| 2023-08-31T00:10:41
| 2023-08-31T00:10:41
| 118,789,569
| 246
| 82
|
Apache-2.0
| 2023-09-13T21:57:02
| 2018-01-24T16:14:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,356
|
py
|
test_benchmark.py
|
import pyhf
from pyhf.simplemodels import uncorrelated_background
import numpy as np
import pytest
def generate_source_static(n_bins):
"""
Create the source structure for the given number of bins.
Args:
n_bins: `list` of number of bins
Returns:
source
"""
binning = [n_bins, -0.5, n_bins + 0.5]
data = [120.0] * n_bins
bkg = [100.0] * n_bins
bkgerr = [10.0] * n_bins
sig = [30.0] * n_bins
source = {
'binning': binning,
'bindata': {'data': data, 'bkg': bkg, 'bkgerr': bkgerr, 'sig': sig},
}
return source
def generate_source_poisson(n_bins):
"""
Create the source structure for the given number of bins.
Sample from a Poisson distribution
Args:
n_bins: `list` of number of bins
Returns:
source
"""
np.random.seed(0) # Fix seed for reproducibility
binning = [n_bins, -0.5, n_bins + 0.5]
data = np.random.poisson(120.0, n_bins).tolist()
bkg = np.random.poisson(100.0, n_bins).tolist()
bkgerr = np.random.poisson(10.0, n_bins).tolist()
sig = np.random.poisson(30.0, n_bins).tolist()
source = {
'binning': binning,
'bindata': {'data': data, 'bkg': bkg, 'bkgerr': bkgerr, 'sig': sig},
}
return source
def hypotest(pdf, data):
return pyhf.infer.hypotest(
1.0,
data,
pdf,
pdf.config.suggested_init(),
pdf.config.suggested_bounds(),
return_tail_probs=True,
return_expected=True,
return_expected_set=True,
)
bins = [1, 10, 50, 100, 200]
bin_ids = [f'{n_bins}_bins' for n_bins in bins]
@pytest.mark.parametrize('n_bins', bins, ids=bin_ids)
def test_hypotest(benchmark, backend, n_bins):
"""
Benchmark the performance of pyhf.utils.hypotest()
for various numbers of bins and different backends
Args:
benchmark: pytest benchmark
backend: `pyhf` tensorlib given by pytest parameterization
n_bins: `list` of number of bins given by pytest parameterization
Returns:
None
"""
source = generate_source_static(n_bins)
pdf = uncorrelated_background(
source['bindata']['sig'], source['bindata']['bkg'], source['bindata']['bkgerr']
)
data = source['bindata']['data'] + pdf.config.auxdata
assert benchmark(hypotest, pdf, data)
|
c063d4280d34710a7268541dfa38aa6351fe5a5c
|
a2721da4c1de8308966850d2f6dea1e334ace532
|
/docs/source/example.py
|
f83ea9c3d5d187736ff0d7aa8e764a73f3da1a2b
|
[
"Apache-2.0"
] |
permissive
|
cdent/gabbi
|
bb895b9943051d674894ea692bc679a4847524d0
|
8ee6cb7b53abdfbe6e5648f485124b80011a6f40
|
refs/heads/main
| 2023-08-31T13:25:21.926791
| 2023-08-26T12:06:48
| 2023-08-26T12:06:48
| 27,924,304
| 152
| 51
|
NOASSERTION
| 2023-08-26T11:31:59
| 2014-12-12T14:46:59
|
Python
|
UTF-8
|
Python
| false
| false
| 904
|
py
|
example.py
|
"""A sample test module."""
# For pathname munging
import os
# The module that build_tests comes from.
from gabbi import driver
# We need access to the WSGI application that hosts our service
from myapp import wsgiapp
# We're using fixtures in the YAML files, we need to know where to
# load them from.
from myapp.test import fixtures
# By convention the YAML files are put in a directory named
# "gabbits" that is in the same directory as the Python test file.
TESTS_DIR = 'gabbits'
def load_tests(loader, tests, pattern):
"""Provide a TestSuite to the discovery process."""
test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
# Pass "require_ssl=True" as an argument to force all tests
# to use SSL in requests.
return driver.build_tests(test_dir, loader,
intercept=wsgiapp.app,
fixture_module=fixtures)
|
ceda9fd89e26e2c47dceee21524f1042011c4ebc
|
812d234c37a9291768018b20c54d45395c07ea97
|
/tavern/_core/pytest/util.py
|
7c0d94c1ec94b14d0c3baa07ec1884b94bfe8626
|
[
"MIT"
] |
permissive
|
taverntesting/tavern
|
060f0cb6b13b5df7129342dce1fa68f16033dc8b
|
9f2d2be9c1321f3f51744675f3b30cb6dda22549
|
refs/heads/master
| 2023-08-10T12:37:22.337961
| 2023-08-05T18:24:24
| 2023-08-05T18:24:24
| 109,144,395
| 984
| 206
|
MIT
| 2023-08-05T17:59:21
| 2017-11-01T14:51:25
|
Python
|
UTF-8
|
Python
| false
| false
| 5,941
|
py
|
util.py
|
import logging
from functools import lru_cache
from typing import Any, Dict
import pytest
from tavern._core.dict_util import format_keys, get_tavern_box
from tavern._core.general import load_global_config
from tavern._core.pytest.config import TavernInternalConfig, TestConfig
from tavern._core.strict_util import StrictLevel
logger = logging.getLogger(__name__)
def add_parser_options(parser_addoption, with_defaults: bool = True) -> None:
"""Add argparse options
This is shared between the CLI and pytest (for now)
See also _core.pytesthook.hooks.pytest_addoption
"""
parser_addoption(
"--tavern-global-cfg",
help="One or more global configuration files to include in every test",
nargs="+",
)
parser_addoption(
"--tavern-http-backend",
help="Which http backend to use",
default="requests" if with_defaults else None,
)
parser_addoption(
"--tavern-mqtt-backend",
help="Which mqtt backend to use",
default="paho-mqtt" if with_defaults else None,
)
parser_addoption(
"--tavern-strict",
help="Default response matching strictness",
default=None,
nargs="+",
)
parser_addoption(
"--tavern-use-default-traceback",
help="Use normal python-style traceback",
default=False,
action="store_true",
)
parser_addoption(
"--tavern-always-follow-redirects",
help="Always follow HTTP redirects",
default=False,
action="store_true",
)
parser_addoption(
"--tavern-file-path-regex",
help="Regex to search for Tavern YAML test files",
default=r".+\.tavern\.ya?ml$",
action="store",
nargs=1,
)
def add_ini_options(parser: pytest.Parser) -> None:
"""Add an option to pass in a global config file for tavern
See also _core.pytesthook._core.util.add_parser_options
"""
parser.addini(
"tavern-global-cfg",
help="One or more global configuration files to include in every test",
type="linelist",
default=[],
)
parser.addini(
"tavern-http-backend", help="Which http backend to use", default="requests"
)
parser.addini(
"tavern-mqtt-backend", help="Which mqtt backend to use", default="paho-mqtt"
)
parser.addini(
"tavern-strict",
help="Default response matching strictness",
type="args",
default=None,
)
parser.addini(
"tavern-use-default-traceback",
help="Use normal python-style traceback",
type="bool",
default=False,
)
parser.addini(
"tavern-always-follow-redirects",
help="Always follow HTTP redirects",
type="bool",
default=False,
)
parser.addini(
"tavern-file-path-regex",
help="Regex to search for Tavern YAML test files",
default=r".+\.tavern\.ya?ml$",
type="args",
)
def load_global_cfg(pytest_config: pytest.Config) -> TestConfig:
return _load_global_cfg(pytest_config).with_new_variables()
@lru_cache()
def _load_global_cfg(pytest_config: pytest.Config) -> TestConfig:
"""Load globally included config files from cmdline/cfg file arguments
Args:
pytest_config: Pytest config object
Returns:
variables/stages/etc from global config files
Raises:
exceptions.UnexpectedKeysError: Invalid settings in one or more config
files detected
"""
# Load ini first
ini_global_cfg_paths = pytest_config.getini("tavern-global-cfg") or []
# THEN load command line, to allow overwriting of values
cmdline_global_cfg_paths = pytest_config.getoption("tavern_global_cfg") or []
all_paths = ini_global_cfg_paths + cmdline_global_cfg_paths
global_cfg_dict = load_global_config(all_paths)
try:
loaded_variables = global_cfg_dict["variables"]
except KeyError:
logger.debug("Nothing to format in global config files")
variables = {}
else:
tavern_box = get_tavern_box()
variables = format_keys(loaded_variables, tavern_box)
global_cfg = TestConfig(
variables=variables,
strict=_load_global_strictness(pytest_config),
follow_redirects=_load_global_follow_redirects(pytest_config),
tavern_internal=TavernInternalConfig(
pytest_hook_caller=pytest_config.hook,
backends=_load_global_backends(pytest_config),
),
stages=global_cfg_dict.get("stages", []),
)
return global_cfg
def _load_global_backends(pytest_config: pytest.Config) -> Dict[str, Any]:
"""Load which backend should be used"""
backend_settings = {}
backends = ["http", "mqtt"]
for b in backends:
backend_settings[b] = get_option_generic(
pytest_config, "tavern-{}-backend".format(b), None
)
return backend_settings
def _load_global_strictness(pytest_config: pytest.Config) -> StrictLevel:
"""Load the global 'strictness' setting"""
options = get_option_generic(pytest_config, "tavern-strict", [])
return StrictLevel.from_options(options)
def _load_global_follow_redirects(pytest_config: pytest.Config):
"""Load the global 'follow redirects' setting"""
return get_option_generic(pytest_config, "tavern-always-follow-redirects", False)
def get_option_generic(pytest_config: pytest.Config, flag: str, default):
"""Get a configuration option or return the default
Priority order is cmdline, then ini, then default"""
cli_flag = flag.replace("-", "_")
ini_flag = flag
# Lowest priority
use = default
# Middle priority
if pytest_config.getini(ini_flag) is not None:
use = pytest_config.getini(ini_flag)
# Top priority
if pytest_config.getoption(cli_flag) is not None:
use = pytest_config.getoption(cli_flag)
return use
|
7b220f956a9687a64510b5837980ff47a2782095
|
98d9a43778d2d0eebe3820ab3dfd0ceb488e204d
|
/core/modules/dockerscan/actions/scan/cli.py
|
50c06dd9e59003a8b30926d6b362d8cbf41b9302
|
[
"MIT"
] |
permissive
|
jm33-m0/emp3r0r
|
f52c3ad75e23a250ff964bf6455677980e3ea281
|
a2f9dfb3fa489493fbf00e23013a2cda0259b82d
|
refs/heads/master
| 2023-08-31T04:32:10.688717
| 2023-08-10T17:04:36
| 2023-08-10T17:04:36
| 236,146,680
| 1,097
| 215
|
MIT
| 2023-09-07T06:29:45
| 2020-01-25T08:55:08
|
Go
|
UTF-8
|
Python
| false
| false
| 650
|
py
|
cli.py
|
import click
from .model import *
from .console import *
from ..helpers import check_console_input_config
@click.command(help="Search for Open Docker Registries")
@click.pass_context
@click.argument("target")
@click.option("--timeout", "-t", "timeout", help="timeout for each port-check")
@click.option("--ports", "-p", "ports",
help="ports to test. i.e: 80,443,8000-8080")
@click.option("-c", "concurrency", help="Maximum concurrency scans")
def scan(ctx, **kwargs):
config = DockerScanModel(**ctx.obj, **kwargs)
# Check if valid
if check_console_input_config(config):
launch_dockerscan_scan_in_console(config)
|
49f37018b08eac8f38049d7bd7eb97cd0f9f6193
|
a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c
|
/notebook/numpy_vstack.py
|
c6072ce202cebd915fd5d5f5ad68bfc688cf5df2
|
[
"MIT"
] |
permissive
|
nkmk/python-snippets
|
a6c66bdf999502e52f4795a3074ced63bf440817
|
f9dd286a9cf93f474e20371f8fffc4732cb3c4d5
|
refs/heads/master
| 2023-08-03T04:20:05.606293
| 2023-07-26T13:21:11
| 2023-07-26T13:21:11
| 98,900,570
| 253
| 77
|
MIT
| 2020-10-25T01:12:53
| 2017-07-31T14:54:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 639
|
py
|
numpy_vstack.py
|
import numpy as np
a1 = np.ones((2, 3), int)
print(a1)
# [[1 1 1]
# [1 1 1]]
a2 = np.full((2, 3), 2)
print(a2)
# [[2 2 2]
# [2 2 2]]
print(np.vstack([a1, a2]))
# [[1 1 1]
# [1 1 1]
# [2 2 2]
# [2 2 2]]
print(np.concatenate([a1, a2], 0))
# [[1 1 1]
# [1 1 1]
# [2 2 2]
# [2 2 2]]
a1 = np.ones(3, int)
print(a1)
# [1 1 1]
a2 = np.full(3, 2)
print(a2)
# [2 2 2]
print(np.vstack([a1, a2]))
# [[1 1 1]
# [2 2 2]]
print(np.stack([a1, a2], 0))
# [[1 1 1]
# [2 2 2]]
a1 = np.ones((2, 3), int)
print(a1)
# [[1 1 1]
# [1 1 1]]
a2 = np.full(3, 2)
print(a2)
# [2 2 2]
print(np.vstack([a1, a2]))
# [[1 1 1]
# [1 1 1]
# [2 2 2]]
|
e763a27be9368e46d88a49f61e8e1710aa6a73c5
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/android/instant_start/benchmark.py
|
4d6fe567e2eda020aa2210d39c9e8bf7d73f7014
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 7,362
|
py
|
benchmark.py
|
#!/usr/bin/env vpython3
#
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run benchmark for Instant start."""
from __future__ import print_function
import argparse
from datetime import datetime
import logging
import os
import pickle
import random
import re
import subprocess
import sys
import time
import stats.analyze
def get_timestamp(adb_log_line):
"""Parse the timestamp in the adb log"""
# adb log doesn't have the year field printed out.
parsed = datetime.strptime(adb_log_line[0:18], '%m-%d %H:%M:%S.%f')
return parsed.replace(year=datetime.now().year)
def keep_awake():
"""Keep the device awake. This works for non-rooted devices as well."""
os.system("adb shell svc power stayon true")
os.system("adb shell input keyevent mouse")
def get_model():
"""Get the device model."""
return subprocess.check_output(
['adb', 'shell', 'getprop', 'ro.product.model']).rstrip()
def run_apk(variant, dry_run=False, reinstall=False, check_state=False):
"""Run Chrome and return metrics"""
keep_awake()
variant_name, apk_script, extra_cmd = variant
logging.warning('Running variant "%s"', variant_name)
assert os.path.exists(apk_script), "Script '%s' doesn't exist" % apk_script
features = '--enable-features=' + ','.join([
'TabGroupsAndroid<Study', 'TabSwitcherOnReturn<Study',
'StartSurfaceAndroid<Study', 'InstantStart<Study'
])
args = '--args=' + ' '.join([
'--disable-fre', '--disable-field-trial-config', features,
'--force-fieldtrials=Study/Group',
'--force-fieldtrial-params=Study.Group:'
'start_surface_return_time_seconds/0'
'/show_last_active_tab_only/true'
'/open_ntp_instead_of_start/true'
] + extra_cmd)
if reinstall:
logging.warning('Uninstalling')
cmd = [apk_script, 'uninstall']
logging.info('Running %s', cmd)
logging.info(subprocess.check_output(cmd, stderr=subprocess.STDOUT))
# Use "unbuffer" to force flushing the output of |apk_script|.
cmd = ['unbuffer', apk_script, 'run', '-vvv', args]
logging.info('Running %s', cmd)
# Use unbuffered pipe to avoid blocking.
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=0)
latencies = []
events_re = re.compile(
r"Startup.Android.(?P<name>[0-9a-zA-Z_]+)[^ ]* = (?P<value>[0-9.]+)")
# Avoid buffering in proc.stdout.next().
# "for line in prod.stdout" might block.
# See https://stackoverflow.com/questions/1183643/
for line in iter(proc.stdout.readline, b''):
if isinstance(line, bytes):
line = line.decode(encoding='utf8')
logging.debug(line.rstrip())
if ('ActivityTaskManager' in line
or 'ActivityManager' in line) and 'START' in line:
start_timestamp = get_timestamp(line)
logging.info('Chrome started at %s', start_timestamp)
if dry_run:
time.sleep(5)
if check_state:
logging.warning('Make sure there is at least one tab, '
'and the Feed is loaded. '
'Press Enter to continue.')
sys.stdin.readline()
break
groups = events_re.search(line)
if groups:
latency = {}
latency['variant_name'] = variant_name
latency['metric_name'] = groups.group('name')
latency['value'] = groups.group('value')
latencies.append(latency)
logging.info(line.rstrip())
logging.info('Got %s = %s', groups.group('name'),
groups.group('value'))
if len(latencies) >= 5:
break
proc.kill()
return latencies
def main():
"""Entry point of the benchmark script"""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--control-apk',
default='out/Release/bin/monochrome_apk',
help='The APK script file for control behavior.')
parser.add_argument('--experiment-apk',
default='out/Release/bin/monochrome_apk',
help='The APK script file for experiment behavior.')
parser.add_argument('--reinstall',
action='store_true',
help='Uninstall before installing the APKs.')
parser.add_argument('--repeat',
type=int,
default=3,
help='How many times to repeat running.')
parser.add_argument('--data-output',
default='runs.pickle',
help='The output file for benchmark data.')
parser.add_argument('-v',
'--verbose',
action='count',
default=0,
help='Be more verbose.')
args, _ = parser.parse_known_args()
level = logging.WARNING
if args.verbose == 1:
level = logging.INFO
elif args.verbose >= 2:
level = logging.DEBUG
logging.basicConfig(level=level,
format='%(asctime)-2s %(levelname)-8s %(message)s')
logging.addLevelName(
logging.WARNING,
"\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(
logging.ERROR,
"\033[1;41m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
try:
subprocess.check_output('which unbuffer', shell=True)
except subprocess.CalledProcessError:
sys.exit('ERROR: "unbuffer" not found. ' +
'Install by running "sudo apt install expect".')
logging.warning('Make sure the device screen is unlocked. '
'Otherwise the benchmark might get stuck.')
# List control/experiment APKs for side-by-side comparison.
variants = []
variants.append(('control', args.control_apk, []))
variants.append(('experiment', args.experiment_apk, []))
metadata = {'model': get_model(), 'start_time': datetime.now()}
logging.warning('Pre-run for flag caching.')
for variant in variants:
run_apk(variant, dry_run=True, reinstall=args.reinstall)
logging.warning('Dry-run for manual state checking.')
for variant in variants:
run_apk(variant, dry_run=True, check_state=True)
runs = []
for i in range(args.repeat):
logging.warning('Run %d/%d', i + 1, args.repeat)
random.shuffle(variants)
for variant in variants:
result = run_apk(variant)
logging.info('Results: %s', result)
runs.extend(result)
time.sleep(10) # try to avoid overloading the device.
with open(args.data_output, 'wb') as pickle_file:
pickle.dump(metadata, pickle_file)
pickle.dump(runs, pickle_file)
logging.info('Saved "%s"', args.data_output)
stats.analyze.print_report(runs, metadata['model'])
if __name__ == '__main__':
sys.exit(main())
|
708b4abf46e453455f46fae4a74935a6880d2ec5
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/CloudIncidentResponse/Scripts/ExtractIndicatorsCloudLogging/ExtractIndicatorsCloudLogging_test.py
|
75b52c53dd0fab25bd916c29da452a500535b25e
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,528
|
py
|
ExtractIndicatorsCloudLogging_test.py
|
from ExtractIndicatorsCloudLogging import extract_aws_info, extract_gcp_info, extract_event_info
def test_extract_event_info():
aws_event = {
"userIdentity": {
"arn": "arn:aws:sts::1111111111:assumed-role/test-1111111034205526016/test",
"accessKeyId": "1111111111",
"instanceId": "i-1234567890",
"sessionContext": {
"sessionIssuer": {
"userName": "test-user"
}
}
},
"sourceIPAddress": "1.1.1.1",
"eventName": "CreateSnapshot",
"eventSource": "ec2.amazonaws.com",
"userAgent": "aws-sdk-go/1.43.16 (go1.20.4 X:boringcrypto; linux; amd64)",
}
# Test AWS event
event_type, event_info = extract_event_info(aws_event)
assert event_type == "AWS"
assert len(event_info) == 7 # Make assertions for individual extracted values
gcp_event = {
"logName": "projects/gcp-integrations/logs/cloudaudit.googleapis.com%2Factivity",
"protoPayload": {
"resourceName": "coordination.test.io/v1/namespaces/test-test/leases/test-test",
"requestMetadata": {
"callerIp": "10.128.0.6",
"callerSuppliedUserAgent": "test-test/v1.23.17 (linux/amd64) kubernetes/f26d814/test-test"
},
"authenticationInfo": {
"principalEmail": "system:test-test"
},
"methodName": "io.k8s.coordination.v1.leases.update"
}
}
# Test GCP event
event_type, event_info = extract_event_info(gcp_event)
assert event_type == "GCP"
assert len(event_info) == 5 # Make assertions for individual extracted values
def test_extract_aws_info():
event = {
"userIdentity": {
"arn": "arn:aws:sts::1111111111:assumed-role/test-1111111034205526016/test",
"accessKeyId": "1111111111",
"instanceId": "i-1234567890",
"sessionContext": {
"sessionIssuer": {
"userName": "test-user"
}
}
},
"sourceIPAddress": "1.1.1.1",
"eventName": "CreateSnapshot",
"userAgent": "aws-sdk-go/1.43.16 (go1.20.4 X:boringcrypto; linux; amd64)",
}
result = extract_aws_info(event)
expected_result = (
"arn:aws:sts::1111111111:assumed-role/test-1111111034205526016/test",
"1111111111",
"i-1234567890",
"1.1.1.1",
"test-user",
"CreateSnapshot",
"aws-sdk-go/1.43.16 (go1.20.4 X:boringcrypto; linux; amd64)"
)
assert result == expected_result
def test_extract_gcp_info():
event = {
"protoPayload": {
"resourceName": "coordination.test.io/v1/namespaces/test-test/leases/test-test",
"requestMetadata": {
"callerIp": "10.128.0.6",
"callerSuppliedUserAgent": "test-test/v1.23.17 (linux/amd64) kubernetes/f26d814/test-test"
},
"authenticationInfo": {
"principalEmail": "system:test-test"
},
"methodName": "io.k8s.coordination.v1.leases.update"
}
}
result = extract_gcp_info(event)
expected_result = (
"coordination.test.io/v1/namespaces/test-test/leases/test-test",
"10.128.0.6",
"system:test-test",
"io.k8s.coordination.v1.leases.update",
"test-test/v1.23.17 (linux/amd64) kubernetes/f26d814/test-test"
)
assert result == expected_result
|
ca806d2d0fa75b27306cb721d915254949aa2cd7
|
6d40c76564adacb9c481e87582c239f9b708f592
|
/benchmarking/harness.py
|
1be054d0686d071eff769584c3531a272fb9d466
|
[
"Apache-2.0"
] |
permissive
|
facebook/FAI-PEP
|
40c9636fcc5f6416783384667449b76d7354d156
|
75ffd8ba91da66987904603a2a13c259c59840c9
|
refs/heads/main
| 2023-08-31T07:48:53.781883
| 2023-08-29T18:41:53
| 2023-08-29T18:41:53
| 111,588,048
| 390
| 93
|
Apache-2.0
| 2023-06-12T23:37:55
| 2017-11-21T18:47:29
|
Python
|
UTF-8
|
Python
| false
| false
| 12,653
|
py
|
harness.py
|
#!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import copy
import json
import os
import shutil
import sys
import tempfile
import threading
import time
from benchmarks.benchmarks import BenchmarkCollector
from driver.benchmark_driver import runOneBenchmark
from frameworks.frameworks import getFrameworks
from platforms.platforms import getPlatforms
from reporters.reporters import getReporters
from utils.custom_logger import getLogger
from utils.utilities import (
getRunKilled,
getRunStatus,
getRunTimeout,
KILLED_FLAG as RUN_KILLED,
parse_kwarg,
setRunStatus,
TIMEOUT_FLAG as RUN_TIMEOUT,
)
parser = argparse.ArgumentParser()
parser.add_argument(
"--android_dir",
default="/data/local/tmp/",
help="The directory in the android device all files are pushed to.",
)
# for backward compatible purpose
parser.add_argument("--backend", help="Specify the backend the test runs on.")
parser.add_argument(
"-b",
"--benchmark_file",
required=True,
help="Specify the json file for the benchmark or a number of benchmarks",
)
parser.add_argument(
"--command_args",
help="Specify optional command arguments that would go with the "
"main benchmark command",
)
parser.add_argument(
"--cooldown",
default=1.0,
type=float,
help="Specify the time interval in seconds between two test runs.",
)
parser.add_argument(
"--debug",
action="store_true",
help="Debug mode to retain all the running binaries and models.",
)
parser.add_argument("--device", help="The single device to run this benchmark on")
parser.add_argument(
"-d",
"--devices",
help="Specify the devices to run the benchmark, in a comma separated "
"list. The value is the device or device_hash field of the meta info.",
)
parser.add_argument(
"--env",
help="environment variables passed to runtime binary.",
nargs="*",
type=parse_kwarg,
default=[],
)
parser.add_argument(
"--excluded_devices",
help="Specify the devices that skip the benchmark, in a comma separated "
"list. The value is the device or device_hash field of the meta info.",
)
parser.add_argument(
"--framework",
required=True,
default="pytorch",
help="Specify the framework to benchmark on.",
)
parser.add_argument(
"--info",
required=True,
help="The json serialized options describing the control and treatment.",
)
parser.add_argument(
"--ios_dir",
default="/tmp",
help="The directory in the ios device all files are pushed to.",
)
parser.add_argument(
"--local_reporter",
help="Save the result to a directory specified by this argument.",
)
parser.add_argument(
"--monsoon_map", help="Map the phone hash to the monsoon serial number."
)
parser.add_argument(
"--simple_local_reporter",
help="Same as local reporter, but the directory hierarchy is reduced.",
)
parser.add_argument(
"--model_cache",
required=True,
help="The local directory containing the cached models. It should not "
"be part of a git directory.",
)
parser.add_argument(
"-p",
"--platform",
required=True,
help="Specify the platform to benchmark on. Use this flag if the framework"
" needs special compilation scripts. The scripts are called build.sh "
"saved in "
+ os.path.join("specifications", "frameworks", "<framework>", "<platform>")
+ " directory",
)
parser.add_argument("--platform_sig", help="Specify the platform signature")
parser.add_argument("--program", help="The program to run on the platform.")
parser.add_argument(
"--reboot",
action="store_true",
help="Tries to reboot the devices before launching benchmarks for one " "commit.",
)
parser.add_argument(
"--regressed_types",
help="A json string that encodes the types of the regressed tests.",
)
parser.add_argument(
"--remote_reporter",
help="Save the result to a remote server. "
"The style is <domain_name>/<endpoint>|<category>",
)
parser.add_argument(
"--remote_access_token", help="The access token to access the remote server"
)
parser.add_argument(
"--root_model_dir",
help="The root model directory if the meta data of the model uses "
"relative directory, i.e. the location field starts with //",
)
parser.add_argument(
"--run_type",
default="benchmark",
choices=["benchmark", "verify", "regress"],
help="The type of the current run. The allowed values are: "
"benchmark, the normal benchmark run."
"verify, the benchmark is re-run to confirm a suspicious regression."
"regress, the regression is confirmed.",
)
parser.add_argument(
"--screen_reporter",
action="store_true",
help="Display the summary of the benchmark result on screen.",
)
parser.add_argument(
"--simple_screen_reporter",
action="store_true",
help="Display the result on screen with no post processing.",
)
parser.add_argument(
"--set_freq",
help="On rooted android phones, set the frequency of the cores. "
"The supported values are: "
"max: set all cores to the maximum frquency. "
"min: set all cores to the minimum frequency. "
"mid: set all cores to the median frequency. ",
)
parser.add_argument(
"--shared_libs",
help="Pass the shared libs that the framework depends on, "
"in a comma separated list.",
)
parser.add_argument(
"--string_map",
help="A json string mapping tokens to replacement strings. "
"The tokens, surrended by {}, when appearing in the test fields of "
"the json file, are to be replaced with the mapped values.",
)
parser.add_argument(
"--timeout",
default=300,
type=float,
help="Specify a timeout running the test on the platforms. "
"The timeout value needs to be large enough so that the low end devices "
"can safely finish the execution in normal conditions. ",
)
parser.add_argument(
"--user_identifier",
help="User can specify an identifier and that will be passed to the "
"output so that the result can be easily identified.",
)
# for backward compabile purpose
parser.add_argument(
"--wipe_cache",
default=False,
help="Specify whether to evict cache or not before running",
)
parser.add_argument(
"--hash_platform_mapping",
help="Specify the devices hash platform mapping json file.",
)
parser.add_argument(
"--device_name_mapping",
default=None,
help="Specify device to product name mapping json file.",
)
# Avoid the prefix user so that it doesn't collide with --user_identifier
parser.add_argument(
"--user_string",
help="Specify the user running the test (to be passed to the remote reporter).",
)
class BenchmarkDriver:
def __init__(self, **kwargs):
setRunStatus(0, overwrite=True)
self.status = 0
raw_args = kwargs.get("raw_args", None)
self.usb_controller = kwargs.get("usb_controller")
self.args, self.unknowns = parser.parse_known_args(raw_args)
self._lock = threading.Lock()
def runBenchmark(self, info, platform, benchmarks):
if self.args.reboot:
platform.rebootDevice()
for idx in range(len(benchmarks)):
tempdir = tempfile.mkdtemp(
prefix="_".join(["aibench", str(self.args.user_identifier), ""])
)
# we need to get a different framework instance per thread
# will consolidate later. For now create a new framework
frameworks = getFrameworks()
framework = frameworks[self.args.framework](tempdir, self.args)
reporters = getReporters(self.args)
benchmark = benchmarks[idx]
# check the framework matches
if "model" in benchmark and "framework" in benchmark["model"]:
assert benchmark["model"]["framework"] == self.args.framework, (
"Framework specified in the json file "
"{} ".format(benchmark["model"]["framework"])
+ "does not match the command line argument "
"{}".format(self.args.framework)
)
if self.args.debug:
for test in benchmark["tests"]:
test["log_output"] = True
if self.args.env:
for test in benchmark["tests"]:
cmd_env = dict(self.args.env)
if "env" in test:
cmd_env.update(test["env"])
test["env"] = cmd_env
b = copy.deepcopy(benchmark)
i = copy.deepcopy(info)
status = runOneBenchmark(
i,
b,
framework,
platform,
self.args.platform,
reporters,
self._lock,
self.args.cooldown,
self.args.user_identifier,
self.args.local_reporter,
)
self.status = self.status | status
if idx != len(benchmarks) - 1:
# cool down period between multiple benchmark runs
cooldown = self.args.cooldown
if "model" in benchmark and "cooldown" in benchmark["model"]:
cooldown = float(benchmark["model"]["cooldown"])
time.sleep(cooldown)
if not self.args.debug:
shutil.rmtree(tempdir, True)
for test in benchmark["tests"]:
if "preprocess" in test and "files" in test["preprocess"]:
for f in test["preprocess"]["files"].values():
shutil.rmtree(f["location"], True)
def run(self):
tempdir = tempfile.mkdtemp(
prefix="_".join(["aibench", str(self.args.user_identifier), ""])
)
getLogger().info("Temp directory: {}".format(tempdir))
info = self._getInfo()
frameworks = getFrameworks()
assert (
self.args.framework in frameworks
), "Framework {} is not supported".format(self.args.framework)
framework = frameworks[self.args.framework](tempdir, self.args)
bcollector = BenchmarkCollector(
framework, self.args.model_cache, args=self.args
)
benchmarks = bcollector.collectBenchmarks(
info, self.args.benchmark_file, self.args.user_identifier
)
platforms = getPlatforms(self.args, tempdir, self.usb_controller)
threads = []
for platform in platforms:
t = threading.Thread(
target=self.runBenchmark, args=(info, platform, benchmarks)
)
t.start()
threads.append(t)
for t in threads:
t.join()
if not self.args.debug:
shutil.rmtree(tempdir, True)
status = self.status | getRunStatus()
if getRunKilled():
status_str = "killed"
elif getRunTimeout():
status_str = "timeout"
elif status == 0:
status_str = "success"
elif status == 1:
status_str = "user error"
elif status == 2:
status_str = "harness error"
else:
status_str = "user and harness error"
getLogger().info(" ======= {} =======".format(status_str))
if getRunKilled():
return RUN_KILLED
if getRunTimeout():
return RUN_TIMEOUT
return status
def _getInfo(self):
info = json.loads(self.args.info)
info["run_type"] = "benchmark"
if "meta" not in info:
info["meta"] = {}
info["meta"]["command_args"] = (
self.args.command_args if self.args.command_args else ""
)
# for backward compatible purpose
if self.args.backend:
info["meta"]["command_args"] += " --backend {}".format(self.args.backend)
if self.args.wipe_cache:
info["meta"]["command_args"] += " --wipe_cache {}".format(
self.args.wipe_cache
)
if self.args.user_string:
info["user"] = self.args.user_string
return info
if __name__ == "__main__":
app = BenchmarkDriver()
status = app.run()
sys.exit(status)
|
0c650f0c01cd0d9e01c16078280950ab2bb7d6dc
|
38c290c804501eff492f1fa7ee8abb00b46c70ef
|
/runtime/space/operators.py
|
5a3eb2d583e6d110f3bbd42a15c5d2445175d00a
|
[
"MIT"
] |
permissive
|
cheery/lever
|
8a9524387bf3bc511889fa9a48f89927fd0b78f2
|
6fa8cd6afec440b32232f87236b0457fb8bfb8b1
|
refs/heads/master
| 2021-01-25T22:09:13.829448
| 2018-05-14T00:09:17
| 2018-05-14T00:09:17
| 45,874,533
| 144
| 13
| null | 2017-10-27T03:04:05
| 2015-11-09T23:40:43
|
Python
|
UTF-8
|
Python
| false
| false
| 14,157
|
py
|
operators.py
|
# We may want to invert many of these dependencies.
from builtin import Builtin, signature
from interface import Object, Interface, null, cast
from customobject import Id
from multimethod import Multimethod
from numbers import Float, Integer, Boolean, to_float, to_int, true, false, is_true, is_false, boolean
from rpython.rlib.rarithmetic import LONG_BIT, ovfcheck
from rpython.rlib.objectmodel import specialize, always_inline
from rpython.rlib.rfloat import copysign
from rpython.rtyper.lltypesystem import rffi
from rpython.rtyper.lltypesystem.module.ll_math import math_fmod
from string import String
from listobject import List
from setobject import Set
from slices import Slice
from uint8array import Uint8Array, Uint8Slice, Uint8Data, alloc_uint8array
import setobject
import space
import math
clamp = Multimethod(3)
coerce = Multimethod(2)
concat = Multimethod(2)
neg = Multimethod(1)
pos = Multimethod(1)
cmp_= Multimethod(2)
ne = Multimethod(2)
eq = Multimethod(2)
lt = Multimethod(2)
le = Multimethod(2)
gt = Multimethod(2)
ge = Multimethod(2)
by_symbol = {
u'clamp': clamp,
u'coerce': coerce,
u'cmp': cmp_,
u'++': concat,
u'!=': ne,
u'==': eq,
u'<': lt,
u'>': gt,
u'<=': le,
u'>=': ge,
u'-expr': neg,
u'+expr': pos,
}
def coerce_by_default(method):
def default(argv):
args = coerce.call(argv)
if not isinstance(args, List):
raise space.unwind(space.LError(u"coerce should return list"))
return method.call_suppressed(args.contents)
method.default = Builtin(default)
def arithmetic_multimethod(sym, operation, flo=False):
operation = specialize.argtype(0, 1)(operation)
method = Multimethod(2)
coerce_by_default(method)
@method.multimethod_s(Integer, Integer)
def _(a, b):
return Integer(operation(a.value, b.value))
if flo:
@method.multimethod_s(Float, Float)
def _(a, b):
return Float(operation(a.number, b.number))
by_symbol[sym] = method
return method
add = arithmetic_multimethod(u'+', (lambda a, b: a + b), flo=True)
sub = arithmetic_multimethod(u'-', (lambda a, b: a - b), flo=True)
mul = arithmetic_multimethod(u'*', (lambda a, b: a * b), flo=True)
or_ = arithmetic_multimethod(u'|', (lambda a, b: a | b))
mod = arithmetic_multimethod(u'%', (lambda a, b: a % b))
and_ = arithmetic_multimethod(u'&', (lambda a, b: a & b))
xor = arithmetic_multimethod(u'xor', (lambda a, b: a ^ b))
min_ = arithmetic_multimethod(u'min', (lambda a, b: min(a, b)), flo=True)
max_ = arithmetic_multimethod(u'max', (lambda a, b: max(a, b)), flo=True)
# min default and max default redefined below.
shl = by_symbol[u'<<'] = Multimethod(2)
coerce_by_default(shl)
shr = by_symbol[u'>>'] = Multimethod(2)
coerce_by_default(shr)
@shl.multimethod_s(Integer, Integer)
def int_shl(a, b):
a_v = int(a.value)
b_v = int(b.value)
if b_v < LONG_BIT: # 0 <= b < LONG_BIT
c = ovfcheck(a_v << b_v)
return Integer(rffi.r_long(c))
if b_v < 0:
raise space.unwind(space.LError(u"negative shift count"))
# b_v >= LONG_BIT
if a_v == 0:
return a
raise OverflowError
@shr.multimethod_s(Integer, Integer)
def int_shr(a, b):
a_v = a.value
b_v = b.value
if b_v >= LONG_BIT: # not (0 <= b < LONG_BIT)
if b_v < 0:
raise space.unwind(space.LError(u"negative shift count"))
# b >= LONG_BIT
if a_v == 0:
return a
a_v = -1 if a_v < 0 else 0
else:
a_v = a_v >> b_v
return Integer(a_v)
@mod.multimethod_s(Float, Float)
def float_mod(a, b):
y = b.number
mod = math_fmod(a.number, y) # Follows pypy implementation.
if mod: # I'm not sure why remainder and denominator
if (y < 0.0) != (mod < 0.0): # must have the same sign.
mod += y
else:
mod = copysign(0.0, y)
return Float(mod)
# You get a float if you divide.
div = by_symbol[u'/'] = Multimethod(2)
coerce_by_default(div)
@div.multimethod_s(Integer, Integer)
def _(a, b):
return Float(float(a.value) / float(b.value))
@div.multimethod_s(Float, Float)
def _(a, b):
return Float(a.number / b.number)
# Long-time due.
floordiv = by_symbol[u'//'] = Multimethod(2)
coerce_by_default(floordiv)
@floordiv.multimethod_s(Integer, Integer)
def _(a, b):
return Integer(a.value // b.value)
@floordiv.multimethod_s(Float, Float)
def _(a, b):
return Float(math.floor(a.number / b.number))
# Binary coercion is used in lever arithmetic to turn left and right side into
# items that can be calculated with.
#@coerce.multimethod_s(Boolean, Boolean)
#def _(a, b):
# return List([Integer(int(a.flag)), Integer(int(b.flag))])
# There is a discussion that this can actually result in
# hiding of errors and isn't a very nice feature in the retrospect.
# We may have to deprecate the implicit int-bool coercion.
# Lets deprecate them and see what we'll get!
#@coerce.multimethod_s(Integer, Boolean)
#def _(a, b):
# return List([a, Integer(int(b.flag))])
#
#@coerce.multimethod_s(Boolean, Integer)
#def _(a, b):
# return List([Integer(int(a.flag)), b])
@coerce.multimethod_s(Integer, Float)
def _(a, b):
return List([Float(float(a.value)), b])
@coerce.multimethod_s(Float, Integer)
def _(a, b):
return List([a, Float(float(b.value))])
@lt.multimethod_s(Integer, Integer)
def cmp_lt(a, b):
return boolean(a.value < b.value)
@gt.multimethod_s(Integer, Integer)
def cmp_gt(a, b):
return boolean(a.value > b.value)
@le.multimethod_s(Integer, Integer)
def cmp_le(a, b):
return boolean(a.value <= b.value)
@ge.multimethod_s(Integer, Integer)
def cmp_ge(a, b):
return boolean(a.value >= b.value)
@lt.multimethod_s(Float, Float)
def cmp_lt(a, b):
return boolean(a.number < b.number)
@gt.multimethod_s(Float, Float)
def cmp_gt(a, b):
return boolean(a.number > b.number)
@le.multimethod_s(Float, Float)
def cmp_le(a, b):
return boolean(a.number <= b.number)
@ge.multimethod_s(Float, Float)
def cmp_ge(a, b):
return boolean(a.number >= b.number)
@lt.multimethod_s(String, String)
def _(a, b):
return boolean(a.string < b.string)
@gt.multimethod_s(String, String)
def _(a, b):
return boolean(a.string > b.string)
@le.multimethod_s(String, String)
def _(a, b):
return boolean(a.string <= b.string)
@ge.multimethod_s(String, String)
def _(a, b):
return boolean(a.string >= b.string)
@signature(Object, Object)
def ne_default(a, b):
return boolean(is_false(eq.call([a, b])))
ne.default = Builtin(ne_default)
@ne.multimethod_s(Integer, Integer)
def _(a, b):
return boolean(a.value != b.value)
@ne.multimethod_s(Float, Float)
def _(a, b):
return boolean(a.number != b.number)
@ne.multimethod_s(String, String)
def _(a, b):
return boolean(a.string != b.string)
# The equality here is a bit convoluted, but it should be good.
@signature(Object, Object)
def eq_default(a, b):
# This strongly enforces the null and boolean identity.
# You can't mess it up by multimethod introductions.
if a == null or b == null:
return boolean(a == b)
elif isinstance(a, Boolean) or isinstance(b, Boolean):
return boolean(a == b)
# This reflects how the cmp_ operates, with an exception that
# if cmp cannot succeed, we will use the identity equality.
args = [a,b]
method = cmp_.fetch_method(args, True)
if method is None:
c = coerce.fetch_method(args, False)
if c is not None:
args = cast(c.call(args), List, u"coerce should return a list").contents
method = cmp_.fetch_method(args, True)
if method is not None:
return boolean(
cast(method.call(args),
Integer, u"cmp should return an int").value == 0)
else:
# This way, the equality and inequality is always defined,
# even if comparison is not defined for everything.
return boolean(a == b)
eq.default = Builtin(eq_default)
@signature(Object, Object)
def lt_default(a, b):
args = [a,b]
method = cmp_.fetch_method(args, True)
if method is not None:
return boolean(
cast(method.call(args),
Integer, u"cmp should return an int").value < 0)
else:
args = cast(coerce.call(args), List,
u"coerce should return a list")
return lt.call_suppressed(args.contents)
lt.default = Builtin(lt_default)
@signature(Object, Object)
def le_default(a, b):
args = [a,b]
method = cmp_.fetch_method(args, True)
if method is not None:
return boolean(
cast(method.call(args),
Integer, u"cmp should return int").value <= 0)
else:
args = cast(coerce.call(args), List,
u"coerce should return a list")
return le.call_suppressed(args.contents)
le.default = Builtin(le_default)
@signature(Object, Object)
def gt_default(a, b):
args = [a,b]
method = cmp_.fetch_method(args, True)
if method is not None:
return boolean(
cast(method.call(args),
Integer, u"cmp should return int").value > 0)
else:
args = cast(coerce.call(args), List,
u"coerce should return a list")
return gt.call_suppressed(args.contents)
gt.default = Builtin(gt_default)
@signature(Object, Object)
def ge_default(a, b):
args = [a,b]
method = cmp_.fetch_method(args, True)
if method is not None:
return boolean(
cast(method.call(args),
Integer, u"cmp should return int").value >= 0)
else:
args = cast(coerce.call(args), List,
u"coerce should return a list")
return ge.call_suppressed(args.contents)
ge.default = Builtin(ge_default)
@signature(Object, Object)
def cmp_default(a, b):
args = cast(coerce.call([a,b]), List,
u"coerce should return a list")
return cmp_.call_suppressed(args.contents)
cmp_.default = Builtin(cmp_default)
@eq.multimethod_s(Integer, Integer)
def _(a, b):
return boolean(a.value == b.value)
@eq.multimethod_s(Float, Float)
def _(a, b):
return boolean(a.number == b.number)
@eq.multimethod_s(Integer, Float) # Added so they won't bite into back soon.
def _(a, b):
return boolean(a.value == b.number)
@eq.multimethod_s(Float, Integer)
def _(a, b):
return boolean(a.number == b.value)
@eq.multimethod_s(String, String)
def _(a, b):
return boolean(a.string == b.string)
@eq.multimethod_s(List, List)
def _(a, b):
if len(a.contents) != len(b.contents):
return false
for i in range(0, len(a.contents)):
if is_false(eq.call([a.contents[i], b.contents[i]])):
return false
return true
@eq.multimethod_s(Id, Id)
def _(a, b):
return boolean(a.ref == b.ref)
@eq.multimethod_s(Slice, Slice)
def _(a, b):
if is_false( eq.call([a.start, b.start]) ):
return false
if is_false( eq.call([a.stop, b.stop]) ):
return false
if is_false( eq.call([a.step, b.step]) ):
return false
return true
@neg.multimethod_s(Integer)
def _(a):
return Integer(-a.value)
@pos.multimethod_s(Integer)
def _(a):
return Integer(+a.value)
@neg.multimethod_s(Float)
def _(a):
return Float(-a.number)
@pos.multimethod_s(Float)
def _(a):
return Float(+a.number)
@concat.multimethod_s(String, String)
def _(a, b):
return String(a.string + b.string)
@concat.multimethod_s(List, List)
def _(a, b):
return List(a.contents + b.contents)
@concat.multimethod_s(Uint8Array, Uint8Array)
@concat.multimethod_s(Uint8Slice, Uint8Array)
@concat.multimethod_s(Uint8Array, Uint8Slice)
@concat.multimethod_s(Uint8Slice, Uint8Slice)
def _(a, b):
c = alloc_uint8array(a.length + b.length)
rffi.c_memcpy(
rffi.cast(rffi.VOIDP, c.uint8data),
rffi.cast(rffi.VOIDP, a.uint8data), a.length)
rffi.c_memcpy(
rffi.cast(rffi.VOIDP, rffi.ptradd(c.uint8data, a.length)),
rffi.cast(rffi.VOIDP, b.uint8data), b.length)
return c
@eq.multimethod_s(Set, Set)
def cmp_eq(a, b):
return boolean(a.eq(b))
@lt.multimethod_s(Set, Set)
def cmp_lt(a, b):
t = setobject.Set_is_superset(b, a)
if space.is_true(t) and len(a._set) != len(b._set):
return space.true
return space.false
@gt.multimethod_s(Set, Set)
def cmp_gt(a, b):
t = setobject.Set_is_superset(a, b)
if space.is_true(t) and len(a._set) != len(b._set):
return space.true
return space.false
@le.multimethod_s(Set, Set)
def cmp_le(a, b):
return setobject.Set_is_superset(b, a)
@ge.multimethod_s(Set, Set)
def cmp_ge(a, b):
return setobject.Set_is_superset(a, b)
@or_.multimethod_s(Set, Set)
def _(a, b):
return setobject.Set_union(a, [b])
@and_.multimethod_s(Set, Set)
def _(a, b):
return setobject.Set_intersection(a, [b])
@sub.multimethod_s(Set, Set)
def _(a, b):
return setobject.Set_difference(a, [b])
@xor.multimethod_s(Set, Set)
def _(a, b):
return setobject.Set_symmetric_difference(a, b)
@clamp.multimethod_s(Slice, Integer, Integer)
def _(c, start, stop):
start, stop, step = c.clamped(start.value, stop.value)
return Slice(Integer(start), Integer(stop), Integer(step))
@mul.multimethod_s(Integer, String)
def _(c, a):
return String(a.string * c.value)
@mul.multimethod_s(String, Integer)
def _(a, c):
return String(a.string * c.value)
def min_default(argv):
if len(argv) == 2 and argv[1] is null:
return argv[0]
if len(argv) == 2 and argv[0] is null:
return argv[1]
args = coerce.call(argv)
if not isinstance(args, List):
raise space.unwind(space.LError(u"coerce should return list"))
return min_.call_suppressed(args.contents)
min_.default = Builtin(min_default)
def max_default(argv):
if len(argv) == 2 and argv[1] is null:
return argv[0]
if len(argv) == 2 and argv[0] is null:
return argv[1]
args = coerce.call(argv)
if not isinstance(args, List):
raise space.unwind(space.LError(u"coerce should return list"))
return max_.call_suppressed(args.contents)
max_.default = Builtin(max_default)
|
0c76c6464bfcf003793567ba4cb0c1b41b3d9276
|
7ae27ce9a8c477855f8fd5fac54685716d868349
|
/invokeai/backend/__init__.py
|
2e77d12eca4d62d5faac3c50327f2d2c9c613475
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
invoke-ai/InvokeAI
|
5f7a2c1f19b1f686099a8cf4cec85aa9c7b6d81d
|
2bd3cf28eabff2dcf3339669be222061dd208cb8
|
refs/heads/main
| 2023-08-31T07:06:56.721576
| 2023-08-30T19:05:17
| 2023-08-30T19:05:17
| 525,592,995
| 15,987
| 1,678
|
Apache-2.0
| 2023-09-14T20:29:39
| 2022-08-17T01:04:27
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 236
|
py
|
__init__.py
|
"""
Initialization file for invokeai.backend
"""
from .model_management import ModelManager, ModelCache, BaseModelType, ModelType, SubModelType, ModelInfo # noqa: F401
from .model_management.models import SilenceWarnings # noqa: F401
|
5d82cd48219d7a0593be41e360001528df9b80a0
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/nxos/show_lag.py
|
a4c9f47b4672594eb0c9e717d4b43eb75c683b5e
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 19,939
|
py
|
show_lag.py
|
"""show_lag.py
supported commands:
* show lacp system-identifier
* show lacp counters
* show lacp neighbor
* show port-channel summary
* show port-channel database
"""
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional, \
Or, \
And, \
Default, \
Use
# import parser utils
from genie.libs.parser.utils.common import Common
# ============================
# parser for show lacp system-identifier
# ============================
class ShowLacpSystemIdentifierSchema(MetaParser):
"""Schema for show lacp system-identifier"""
schema = {
'system_id_mac': str,
'system_priority': int,
}
class ShowLacpSystemIdentifier(ShowLacpSystemIdentifierSchema):
"""Parser for :
show lacp system-identifier"""
cli_command = 'show lacp system-identifier'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
parsed_dict = {}
# 32768,5e-2-0-1-0-7
p1 = re.compile(r'^\s*(?P<system_priority>[\d]+), *(?P<system_id_mac>[\w.\-]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
parsed_dict.update({'system_priority': int(group['system_priority'])})
parsed_dict.update({'system_id_mac': group['system_id_mac']})
continue
return parsed_dict
# ============================
# schema for show lacp counters
# ============================
class ShowLacpCountersSchema(MetaParser):
"""schema for: show lcap counters"""
schema = {
'interfaces': {
Any(): {
'members': {
Any(): {
'interface': str,
'counters': {
'lacp_in_pkts': int,
'lacp_out_pkts': int,
'lacp_errors': int,
'marker_resp_in_pkts': int,
'marker_resp_out_pkts': int
},
},
}
},
},
}
# =============================
# parser for show lacp counters
# ============================
class ShowLacpCounters(ShowLacpCountersSchema):
"""Parser for: show lacp counters"""
cli_command = 'show lacp counters'
exclude = ['lacp_in_pkts' , 'lacp_out_pkts']
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# init return dict
parsed_dict = {}
intf_dict = {}
# port-channel1
p1 = re.compile(r'^port-channel\d+$')
# ------------------------------------------------------------------------------
# LACPDUs Markers / Resp
# LACPDUs
# Port Sent Recv Recv
# Sent Pkts
# Err
# ------------------------------------------------------------------------------
# port - channel1
# Ethernet1 / 1 92 85 0 0
# 0
# Ethernet1 / 2 79 87 0 0
# 0
#
# port - channel2
# Ethernet1 / 3 136 112 0 0
# 0
# Ethernet1 / 4 95 90 0 0
# 0
# Ethernet1 / 5 118 146 0 0
# 0
p2 = re.compile(
r'^(?P<interface>[\w\/]+) +(?P<lacp_out_pkts>[\d]+) +(?P<lacp_in_pkts>[\d]+)'
' +(?P<marker_in_pkts>[\d]+) +(?P<marker_out_pkts>[\d]+) +( +('
'?P<lacp_pkts_errors>[\d]+))?$')
for line in out.splitlines():
if line:
line = line.strip().replace('\xa0', ' ')
else:
continue
# port-channel1
m = p1.match(line)
if m:
port_channel = Common.convert_intf_name(m.group()).capitalize()
intf_dict = parsed_dict.setdefault('interfaces', {}).setdefault(
port_channel, {})
continue
m = p2.match(line)
if m:
group = m.groupdict()
interface = Common.convert_intf_name(group["interface"]).capitalize()
member_dict = intf_dict.setdefault('members', {}).setdefault(interface,
{})
member_dict.update({'interface': interface})
counter_dict = member_dict.setdefault('counters', {})
counter_dict.update({'lacp_in_pkts': int(group['lacp_in_pkts'])})
counter_dict.update({'lacp_out_pkts': int(group['lacp_out_pkts'])})
counter_dict.update({'marker_resp_in_pkts': int(group['marker_in_pkts'])})
counter_dict.update(
{'marker_resp_out_pkts': int(group['marker_out_pkts'])})
counter_dict.update({'lacp_errors': int(group['lacp_pkts_errors'])})
continue
return parsed_dict
# =============================
# schema for show lacp neighbor
# ============================
class ShowLacpNeighborSchema(MetaParser):
"""schema for: show lacp neighbor"""
schema = {
'interfaces': {
Any(): {
'members': {
Any(): {
'interface': str,
'activity': str,
'oper_key': int,
'port_num': int,
'partner_id': str,
'age': int,
'interval': str,
'lacp_port_priority': int,
'port_state': int,
},
}
},
},
}
# =============================
# parser for show lacp neighbor
# ============================
class ShowLacpNeighbor(ShowLacpNeighborSchema):
"""parser for: show lacp neighbor"""
cli_command = 'show lacp neighbor'
exclude = ['age']
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# init dictionary
parsed_dict = {}
intf_dict = {}
member_dict = {}
# port-channel1 neighbors
p1 = re.compile(r'^(?P<port_channel>[\w-]+)[\xa0 ]+neighbors$')
# Partner Partner Partner
# Port System ID Port Number Age Flags
# Eth1/1 32768,5e-2-0-1-0-7 0x101 1140 SA
p2 = re.compile(
r'^(?P<interface>[\w/]+)[\xa0 ]+\d+,[\xa0 ]*(?P<sys_id>[\w.\-]+)[\xa0 ]+('
r'?P<port_num>0x[a-fA-F0-9]+)[\xa0 ]+(?P<age>\d+)[\xa0 ]+(?P<flags>[\w]+)$')
# LACP Partner Partner Partner
# Port Priority Oper Key Port State
# 32768 0x8000 0x3d
p3 = re.compile(
r'^(?P<lacp_port_priority>\d+)[\xa0 ]+(?P<oper_key>0x[a-fA-F0-9]+)[\xa0 ]+('
r'?P<port_state>0x[a-fA-F0-9]+)$')
for line in out.splitlines():
if line:
line = line.strip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
port_channel = Common.convert_intf_name(
group['port_channel']).capitalize()
intf_dict = parsed_dict.setdefault('interfaces', {}).setdefault(
port_channel, {})
continue
m = p2.match(line)
if m:
group = m.groupdict()
interface = Common.convert_intf_name(group.pop("interface")).capitalize()
member_dict = intf_dict.setdefault('members', {}).setdefault(interface,
{})
member_dict.update({'interface': interface})
flags = group['flags'].lower()
if 'a' in flags:
activity = 'active'
else:
activity = 'passive'
if 's' in flags:
interval = 'slow'
else:
interval = 'fast'
member_dict.update({'interval': interval})
member_dict.update({'activity': activity})
member_dict.update({'port_num': int(group['port_num'], 0)})
member_dict.update({'partner_id': group['sys_id']})
member_dict.update({'age': int(group['age'])})
continue
m = p3.match(line)
if m:
group = m.groupdict()
member_dict.update(
{'lacp_port_priority': int(group['lacp_port_priority'])})
member_dict.update({'oper_key': int(group['oper_key'], 0)})
member_dict.update({'port_state': int(group['port_state'], 0)})
continue
return parsed_dict
# =============================
# schema for show port-channel summary
# ============================
class ShowPortChannelSummarySchema(MetaParser):
"""schema for: show show port-channel summary"""
schema = {
'interfaces': {
Any(): {
'bundle_id': int,
'oper_status': str,
'layer': str, # routed vs switched
'protocol': str,
'type': str,
'members': {
Any(): {
'flags': str,
}
},
},
}
}
# =============================
# parser for show port-channel summary
# ============================
class ShowPortChannelSummary(ShowPortChannelSummarySchema):
"""parser for: show port-channel summary"""
cli_command = 'show port-channel summary'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
parsed_dict = {}
inft_dict = {}
# --------------------------------------------------------------------------------
# Group Port- Type Protocol Member Ports
# Channel
# --------------------------------------------------------------------------------
# 1 Po1(RU) Eth LACP Eth1/1(P) Eth1/2(P)
# 2 Po2(SU) Eth LACP Eth1/3(P) Eth1/4(P) Eth1/5(H)
p1 = re.compile(
r'(?P<bundle_id>[\d]+)[\xa0 ]+(?P<name>[\w\-]+)\((?P<flags>[\w]+)\)?[\xa0 '
r']+(?P<type>\w+)[\xa0 ]+(?P<protocol>[\w\-]+)?[\xa0 ]+(?P<ports>[\w\-/() '
r'\xa0]+ *)?$')
# Eth1/6(P) Eth1/7(P) Eth1/8(H)
p2 = re.compile(
r'^\s*(?P<space>\s{37})(?P<ports>[\w\-\/() \xa0]+)?')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
# 1 Po1(RU) Eth LACP Eth1/1(P) Eth1/2(P)
m = p1.match(line)
if m:
group = m.groupdict()
name = Common.convert_intf_name(group["name"]).capitalize()
intf_dict = parsed_dict.setdefault('interfaces', {}).setdefault(name, {})
intf_dict.update({'bundle_id': int(group["bundle_id"])})
intf_dict.update({'type': group['type'].lower()})
intf_dict.update({'protocol': group['protocol'].lower()})
flags = group['flags'].lower()
intf_dict.update({'layer': 'switched' if 's' in flags else 'routed'})
intf_dict.update({'oper_status': 'up' if 'u' in flags else 'down'})
port_dict = intf_dict.setdefault('members', {})
port_list = re.findall(r'([\w/]+)\((\w+)\)', group['ports'])
for port in port_list:
intf = Common.convert_intf_name(port[0]).capitalize()
port_sub_dict = port_dict.setdefault(intf, {})
port_sub_dict.update({'flags': port[1]})
continue
# Eth1/46(P) Eth1/47(D) Eth1/48(P)
m = p2.match(line)
if m:
group = m.groupdict()
port_list = re.findall(r'([\w/]+)\((\w+)\)', group['ports'])
for port in port_list:
intf = Common.convert_intf_name(port[0]).capitalize()
port_sub_dict = port_dict.setdefault(intf, {})
port_sub_dict.update({'flags': port[1]})
continue
return parsed_dict
# =====================================
# schema for show port-channel database
# =====================================
class ShowPortChannelDatabaseSchema(MetaParser):
""" schema for : show post-channel database"""
schema = {
'interfaces': {
Any(): {
'last_update_success': bool, # successful => True, else False
'total_ports': int,
'up_ports': int,
'port_channel_age': str,
'time_last_bundle': str,
'last_bundled_member': str,
Optional('first_oper_port'): str,
Optional('time_last_unbundle'): str,
Optional('last_unbundled_member'): str,
'members': {
Any(): {
'activity': str,
'status': str,
'is_first_oper_port': bool
}
}
}
}
}
# =====================================
# parser for show port-channel database
# =====================================
class ShowPortChannelDatabase(ShowPortChannelDatabaseSchema):
"""parser show port-channel database"""
cli_command = 'show port-channel database'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
parsed_dict = {}
intf_dict = {}
# port-channel1
p1 = re.compile(r'^port-channel\d+$')
# Last membership update is successful
p2 = re.compile(r'^Last +membership +update +is +(?P<last_update_status>\w+)$')
# 2 ports in total, 2 ports up
p3 = re.compile(
r'^(?P<total_ports>\d+) +ports +in +total, +(?P<up_ports>\d+) +ports +up$')
# First operational port is Ethernet1/1
p4 = re.compile(r'^First +operational +port +is +(?P<first_oper_port>[\w/]+)$')
# Age of the port-channel is 0d:02h:31m:22s
p5 = re.compile(
r'^Age +of +the +port-channel +is +(?P<port_channel_age>[0-9:smhd]+)$')
# Time since last bundle is 0d:02h:28m:30s
p6 = re.compile(
r'^Time +since +last +bundle +is +(?P<time_last_bundle>[0-9:smhd]+)$')
# Last bundled member is Ethernet1/2
p7 = re.compile(r'^Last +bundled +member +is +(?P<last_bundled_member>[\w/]+)$')
# Time since last unbundle is 0d:00h:14m:05s
p8 = re.compile(
r'^Time +since +last +unbundle +is +(?P<time_last_unbundle>[0-9:smhd]+)$')
# Last unbundled member is Ethernet1/5
p9 = re.compile(
r'^Last +unbundled +member +is +(?P<last_unbundled_member>[\w/]+)$')
# Ports: Ethernet1/3 [passive] [up]
# Ethernet1/4 [passive] [up] *
# Ethernet1/5 [passive] [hot-standy]
# Ports: Ethernet1/25 [on] [up]
# Ethernet1/26 [on] [up] *
p10 = re.compile(
r'^(Ports:)?\s*(?P<interface>[\w/]+)\s+\[(?P<activity>(passive|active|on|off)) *\] '
r'+\['
r'(?P<status>[\w-]+)\](?P<fop>\s+\*)*$')
for line in out.splitlines():
if line:
line = line.strip()
else:
continue
# port-channel1
m = p1.match(line)
if m:
name = Common.convert_intf_name(m.group()).capitalize()
intf_dict = parsed_dict.setdefault('interfaces', {}).setdefault(name, {})
continue
# Last membership update is successful
m = p2.match(line)
if m:
group = m.groupdict()
intf_dict.update({'last_update_success': True if group[
'last_update_status'] == 'successful' else False})
continue
# 2 ports in total, 2 ports up
m = p3.match(line)
if m:
group = m.groupdict()
intf_dict.update({'total_ports': int(group['total_ports'])})
intf_dict.update({'up_ports': int(group['up_ports'])})
continue
# First operational port is Ethernet1/1
m = p4.match(line)
if m:
group = m.groupdict()
intf_dict.update({'first_oper_port': group['first_oper_port']})
continue
# Age of the port-channel is 0d:02h:31m:22s
m = p5.match(line)
if m:
group = m.groupdict()
intf_dict.update({'port_channel_age': group['port_channel_age']})
continue
# Time since last bundle is 0d:02h:28m:30s
m = p6.match(line)
if m:
group = m.groupdict()
intf_dict.update({'time_last_bundle': group['time_last_bundle']})
continue
# Last bundled member is Ethernet1/2
m = p7.match(line)
if m:
group = m.groupdict()
intf_dict.update({'last_bundled_member': group['last_bundled_member']})
continue
# Time since last unbundle is 0d:00h:14m:05s
m = p8.match(line)
if m:
group = m.groupdict()
intf_dict.update({'time_last_unbundle': group['time_last_unbundle']})
continue
# Last unbundled member is Ethernet1/5
m = p9.match(line)
if m:
group = m.groupdict()
intf_dict.update(
{'last_unbundled_member': group['last_unbundled_member']})
continue
# Ports: Ethernet1/3 [passive] [up]
# Ethernet1/4 [passive] [up] *
# Ethernet1/5 [passive] [hot-standy]
m = p10.match(line)
if m:
group = m.groupdict()
sub_dict = intf_dict.setdefault('members', {}).setdefault(
group['interface'], {})
sub_dict.update({'activity': group['activity']})
sub_dict.update({'status': group['status']})
sub_dict.update({'is_first_oper_port': True if group['fop'] else False})
continue
return parsed_dict
|
f89b61cbc183480704194b0871820ca3c7fb1707
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-quality/soda-core/soda/core/soda/execution/check/group_by_check.py
|
0ab393ed390a8212976f2eb28a938f7abdbf292e
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 4,726
|
py
|
group_by_check.py
|
from __future__ import annotations
import copy
from soda.execution.check.check import Check
from soda.execution.check_outcome import CheckOutcome
from soda.execution.check_type import CheckType
from soda.execution.metric.metric import Metric
from soda.execution.partition import Partition
GROUP_BY_RESULTS = "group_by_results"
class GroupByCheck(Check):
def __init__(
self,
check_cfg: GroupByCheckCfg,
data_source_scan: DataSourceScan,
partition: Partition,
):
super().__init__(
check_cfg=check_cfg,
data_source_scan=data_source_scan,
partition=partition,
column=None,
)
self.check_value = None
self.check_type = CheckType.LOCAL
from soda.execution.metric.group_by_metric import GroupByMetric
group_by_metric = data_source_scan.resolve_metric(
GroupByMetric(
data_source_scan=self.data_source_scan,
partition=partition,
query=self.check_cfg.query,
check=self,
)
)
self.metrics[GROUP_BY_RESULTS] = group_by_metric
def evaluate(self, metrics: dict[str, Metric], historic_values: dict[str, object]):
query_results = metrics[GROUP_BY_RESULTS].value
group_limit = self.check_cfg.group_limit
if len(query_results) > group_limit:
raise Exception(
f"Total number of groups {len(query_results)} exceeds configured group limit: {group_limit}"
)
fields = list(self.check_cfg.fields)
group_check_cfgs = self.check_cfg.check_cfgs
groups = [tuple(map(qr.get, fields)) for qr in query_results]
group_checks = []
for group in groups:
for gcc in group_check_cfgs:
if gcc.name is None:
raise Exception("name property is required for the group check")
group_name = f"{','.join(str(v) for v in group)}"
config = copy.deepcopy(gcc)
config.name = gcc.name + f" [{group_name}]"
config.source_configurations["group_value"] = f"[{group_name}]"
column = ",".join(fields)
gc = Check.create(
check_cfg=config, data_source_scan=self.data_source_scan, partition=self.partition, column=column
)
result = next(filter(lambda qr: tuple(map(qr.get, fields)) == group, query_results))
if result is not None:
gc.check_value = result[config.metric_name]
metric = Metric(
self.data_source_scan,
self.partition,
column=None,
name=config.name,
check=None,
identity_parts=[],
)
# TODO fetch historic values, change over time checks will not work yet
# historic_values = {}
# if gc.historic_descriptors:
# for hd_key, hd in gc.historic_descriptors.items():
# print(f"hd_key: {hd_key}, hd: {hd}")
# historic_values[hd_key] = self.data_source_scan.scan.__get_historic_data_from_soda_cloud_metric_store(hd)
metric.set_value(gc.check_value)
self.data_source_scan.scan._add_metric(metric)
gc.metrics = {config.metric_name: metric}
gc.evaluate(metrics=None, historic_values=None)
cloud_group_attr = {
"group": {
"identity": self.create_identity(with_datasource=True, with_filename=True),
"name": gcc.name,
"distinctLabel": group_name,
}
}
gc.cloud_dict.update(cloud_group_attr)
gc.dict.update(cloud_group_attr)
group_checks.append(gc)
self.data_source_scan.scan._checks.extend(group_checks)
if all(gc.outcome == CheckOutcome.PASS for gc in group_checks):
self.outcome = CheckOutcome.PASS
elif any(gc.outcome == CheckOutcome.FAIL for gc in group_checks):
self.outcome = CheckOutcome.FAIL
else:
self.outcome = CheckOutcome.PASS
def get_cloud_diagnostics_dict(self) -> dict:
group_by_diagnostics = {}
return group_by_diagnostics
def get_log_diagnostic_lines(self) -> list[str]:
diagnostics_texts: list[str] = []
return diagnostics_texts
|
1cf83f8a2c0c95789dbe69fa578b3ce258129a56
|
5d31f28f56b125f7e22d538a735587cfe779cd2d
|
/thumbor/context.py
|
b35dcca156ece8158f533fc5a70244e5e09b64d0
|
[
"MIT"
] |
permissive
|
thumbor/thumbor
|
c3cb0fbdbf66492d321d4d64e9bc3cb59bfbc1fa
|
3e35fc024e895313d4b8c1da29286b96859ae122
|
refs/heads/master
| 2023-07-19T20:45:22.507924
| 2023-01-27T16:34:15
| 2023-07-14T19:02:46
| 1,488,139
| 7,641
| 839
|
MIT
| 2023-09-05T21:36:16
| 2011-03-16T17:30:05
|
Python
|
UTF-8
|
Python
| false
| false
| 9,207
|
py
|
context.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from os.path import abspath, exists
from thumbor.filters import FiltersFactory
from thumbor.metrics.logger_metrics import Metrics
from thumbor.threadpool import ThreadPool
# Same logic as Importer. This class is very useful and will remain like so for now
class Context: # pylint: disable=too-many-instance-attributes
"""
Class responsible for containing:
* Server Configuration Parameters (port, ip, key, etc);
* Configurations read from config file (or defaults);
* Importer with imported modules (engine, filters, detectors, etc);
* Request Parameters (width, height, smart, meta, etc).
Each instance of this class MUST be unique per request.
This class should not be cached in the server.
"""
def __init__(
self, server=None, config=None, importer=None, request_handler=None
):
self.server = server
self.config = config
if importer:
self.modules = ContextImporter(self, importer)
if importer.metrics:
self.metrics = importer.metrics(config)
else:
self.metrics = Metrics(config)
else:
self.modules = None
self.metrics = Metrics(config)
self.app_class = "thumbor.app.ThumborServiceApp"
if hasattr(self.config, "APP_CLASS"):
self.app_class = self.config.APP_CLASS
if (
hasattr(self.server, "app_class")
and self.server.app_class != "thumbor.app.ThumborServiceApp"
):
self.app_class = self.server.app_class
self.filters_factory = FiltersFactory(
self.modules.filters if self.modules else []
)
self.request_handler = request_handler
self.thread_pool = ThreadPool.instance(
getattr(config, "ENGINE_THREADPOOL_SIZE", 0)
)
self.headers = {}
def __enter__(self):
return self
def __exit__(self, exception_type, value, traceback):
if self.modules:
self.modules.cleanup()
self.thread_pool.cleanup()
class ServerParameters: # pylint: disable=too-many-instance-attributes
def __init__(
self,
port,
ip, # pylint: disable=invalid-name
config_path,
keyfile,
log_level,
app_class,
debug=False,
fd=None, # pylint: disable=invalid-name
gifsicle_path=None,
use_environment=False,
processes=1,
):
self.port = port
self.ip = ( # Other people may depend on this pylint: disable=invalid-name
ip
)
self.config_path = config_path
self.keyfile = keyfile
self.log_level = log_level
self.app_class = app_class
self.debug = debug
self._security_key = None
self.load_security_key()
self.fd = ( # Other people may depend on this pylint: disable=invalid-name
fd
)
self.gifsicle_path = gifsicle_path
self.use_environment = use_environment
self.processes = processes
@property
def security_key(self):
return self._security_key
@security_key.setter
def security_key(self, key):
self._security_key = key
def load_security_key(self):
if not self.keyfile:
return
path = abspath(self.keyfile)
if not exists(path):
raise ValueError(
(
f"Could not find security key file at {path}. "
"Please verify the keypath argument."
)
)
with open(path, "rb") as security_key_file:
security_key = security_key_file.read().strip()
self.security_key = security_key
class RequestParameters: # pylint: disable=too-few-public-methods,too-many-instance-attributes,too-many-locals
def __init__(
self,
debug=False,
meta=False,
trim=None,
crop_left=None,
crop_top=None,
crop_right=None,
crop_bottom=None,
crop=None,
adaptive=False,
full=False,
fit_in=False,
stretch=False,
width=0,
height=0,
horizontal_flip=False,
vertical_flip=False,
halign="center",
valign="middle",
filters=None,
smart=False,
quality=80,
image=None,
url=None,
extension=None, # pylint: disable=unused-argument
buffer=None, # pylint: disable=unused-argument
focal_points=None,
unsafe=False,
hash=None, # pylint: disable=unused-argument,redefined-builtin
accepts_webp=False,
request=None,
max_age=None,
auto_png_to_jpg=None,
):
self.debug = bool(debug)
self.meta = bool(meta)
self.trim = trim
if trim is not None:
trim_parts = trim.split(":")
self.trim_pos = (
trim_parts[1] if len(trim_parts) > 1 else "top-left"
)
self.trim_tolerance = (
int(trim_parts[2]) if len(trim_parts) > 2 else 0
)
if crop is not None:
self.crop = {k: self.int_or_0(v) for k, v in crop.items()}
else:
self.crop = {
"left": self.int_or_0(crop_left),
"right": self.int_or_0(crop_right),
"top": self.int_or_0(crop_top),
"bottom": self.int_or_0(crop_bottom),
}
self.should_crop = (
self.crop["left"] > 0
or self.crop["top"] > 0
or self.crop["right"] > 0
or self.crop["bottom"] > 0
)
self.adaptive = bool(adaptive)
self.full = bool(full)
self.fit_in = bool(fit_in)
self.stretch = bool(stretch)
self.width = "orig" if width == "orig" else self.int_or_0(width)
self.height = "orig" if height == "orig" else self.int_or_0(height)
self.horizontal_flip = bool(horizontal_flip)
self.vertical_flip = bool(vertical_flip)
self.halign = halign or "center"
self.valign = valign or "middle"
self.smart = bool(smart)
if filters is None:
filters = []
self.filters = filters
self.image_url = image
self.url = url
self.detection_error = None
self.quality = quality
self.buffer = None
if focal_points is None:
focal_points = []
self.focal_points = focal_points
self.hash = hash
self.prevent_result_storage = False
self.unsafe = unsafe == "unsafe" or unsafe is True
self.format = None
self.accepts_webp = accepts_webp
self.max_bytes = None
self.max_age = max_age
self.auto_png_to_jpg = auto_png_to_jpg
self.headers = None
if request:
self.url = request.path
self.accepts_webp = "image/webp" in request.headers.get(
"Accept", ""
)
if request.headers:
self.headers = request.headers
@staticmethod
def int_or_0(value):
return 0 if value is None else int(value)
class ContextImporter: # pylint: disable=too-few-public-methods,too-many-instance-attributes
def __init__(self, context, importer):
self.context = context
self.importer = importer
self.engine = None
if importer.engine:
self.engine = importer.engine(context)
self.gif_engine = None
if importer.gif_engine:
self.gif_engine = importer.gif_engine(context)
self.storage = None
if importer.storage:
self.storage = importer.storage(context)
self.result_storage = None
if importer.result_storage:
self.result_storage = importer.result_storage(context)
self.upload_photo_storage = None
if importer.upload_photo_storage:
self.upload_photo_storage = importer.upload_photo_storage(context)
self.loader = importer.loader
self.detectors = importer.detectors
self.filters = importer.filters
self.optimizers = importer.optimizers
self.url_signer = importer.url_signer
self.compatibility_legacy_loader = importer.compatibility_legacy_loader
self.compatibility_legacy_storage = None
if importer.compatibility_legacy_storage is not None:
self.compatibility_legacy_storage = (
importer.compatibility_legacy_storage(context)
)
self.compatibility_legacy_result_storage = None
if importer.compatibility_legacy_result_storage is not None:
self.compatibility_legacy_result_storage = (
importer.compatibility_legacy_result_storage(context)
)
def cleanup(self):
if self.engine:
self.engine.cleanup()
|
9976e54ba4685272ae614819d4d680a12e234a15
|
165be8367f5753b03fae11430b1c3ebf48aa834a
|
/tools/converter/tools/testConvertor.py
|
8316f319566a8daf943f19b6dc87a2e2b62f7d04
|
[
"Apache-2.0"
] |
permissive
|
alibaba/MNN
|
f21b31e3c62d9ba1070c2e4e931fd9220611307c
|
c442ff39ec9a6a99c28bddd465d8074a7b5c1cca
|
refs/heads/master
| 2023-09-01T18:26:42.533902
| 2023-08-22T11:16:44
| 2023-08-22T11:16:44
| 181,436,799
| 8,383
| 1,789
| null | 2023-09-07T02:01:43
| 2019-04-15T07:40:18
|
C++
|
UTF-8
|
Python
| false
| false
| 309
|
py
|
testConvertor.py
|
#!/usr/bin/python
import os
import sys
def run(path):
cmd = "find " + path + " -name \"*.pb\" | xargs -I {} ./MNNConvert -f TF --modelFile {} --MNNModel temp.mnn --bizCode test"
# print(cmd)
print(os.popen(cmd).read())
return 0
if __name__ == "__main__":
path = sys.argv[1]
run(path)
|
0870392d659b1a3103ff454d3e868b6b54f36731
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-ecd/aliyunsdkecd/request/v20200930/CreateDesktopsRequest.py
|
79a3d134615d3f51bf206b0a0f645645807f6d56
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 8,185
|
py
|
CreateDesktopsRequest.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecd.endpoint import endpoint_data
class CreateDesktopsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ecd', '2020-09-30', 'CreateDesktops')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_VolumeEncryptionKey(self): # String
return self.get_query_params().get('VolumeEncryptionKey')
def set_VolumeEncryptionKey(self, VolumeEncryptionKey): # String
self.add_query_param('VolumeEncryptionKey', VolumeEncryptionKey)
def get_OfficeSiteId(self): # String
return self.get_query_params().get('OfficeSiteId')
def set_OfficeSiteId(self, OfficeSiteId): # String
self.add_query_param('OfficeSiteId', OfficeSiteId)
def get_BundleId(self): # String
return self.get_query_params().get('BundleId')
def set_BundleId(self, BundleId): # String
self.add_query_param('BundleId', BundleId)
def get_UserAssignMode(self): # String
return self.get_query_params().get('UserAssignMode')
def set_UserAssignMode(self, UserAssignMode): # String
self.add_query_param('UserAssignMode', UserAssignMode)
def get_Hostname(self): # String
return self.get_query_params().get('Hostname')
def set_Hostname(self, Hostname): # String
self.add_query_param('Hostname', Hostname)
def get_DesktopNameSuffix(self): # Boolean
return self.get_query_params().get('DesktopNameSuffix')
def set_DesktopNameSuffix(self, DesktopNameSuffix): # Boolean
self.add_query_param('DesktopNameSuffix', DesktopNameSuffix)
def get_DirectoryId(self): # String
return self.get_query_params().get('DirectoryId')
def set_DirectoryId(self, DirectoryId): # String
self.add_query_param('DirectoryId', DirectoryId)
def get_EndUserIds(self): # RepeatList
return self.get_query_params().get('EndUserId')
def set_EndUserIds(self, EndUserId): # RepeatList
for depth1 in range(len(EndUserId)):
self.add_query_param('EndUserId.' + str(depth1 + 1), EndUserId[depth1])
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_BundleModelss(self): # RepeatList
return self.get_query_params().get('BundleModels')
def set_BundleModelss(self, BundleModels): # RepeatList
for depth1 in range(len(BundleModels)):
if BundleModels[depth1].get('VolumeEncryptionEnabled') is not None:
self.add_query_param('BundleModels.' + str(depth1 + 1) + '.VolumeEncryptionEnabled', BundleModels[depth1].get('VolumeEncryptionEnabled'))
if BundleModels[depth1].get('VolumeEncryptionKey') is not None:
self.add_query_param('BundleModels.' + str(depth1 + 1) + '.VolumeEncryptionKey', BundleModels[depth1].get('VolumeEncryptionKey'))
if BundleModels[depth1].get('Amount') is not None:
self.add_query_param('BundleModels.' + str(depth1 + 1) + '.Amount', BundleModels[depth1].get('Amount'))
if BundleModels[depth1].get('DesktopName') is not None:
self.add_query_param('BundleModels.' + str(depth1 + 1) + '.DesktopName', BundleModels[depth1].get('DesktopName'))
if BundleModels[depth1].get('Hostname') is not None:
self.add_query_param('BundleModels.' + str(depth1 + 1) + '.Hostname', BundleModels[depth1].get('Hostname'))
if BundleModels[depth1].get('EndUserIds') is not None:
for depth2 in range(len(BundleModels[depth1].get('EndUserIds'))):
self.add_query_param('BundleModels.' + str(depth1 + 1) + '.EndUserIds.' + str(depth2 + 1), BundleModels[depth1].get('EndUserIds')[depth2])
if BundleModels[depth1].get('BundleId') is not None:
self.add_query_param('BundleModels.' + str(depth1 + 1) + '.BundleId', BundleModels[depth1].get('BundleId'))
def get_VolumeEncryptionEnabled(self): # Boolean
return self.get_query_params().get('VolumeEncryptionEnabled')
def set_VolumeEncryptionEnabled(self, VolumeEncryptionEnabled): # Boolean
self.add_query_param('VolumeEncryptionEnabled', VolumeEncryptionEnabled)
def get_DesktopName(self): # String
return self.get_query_params().get('DesktopName')
def set_DesktopName(self, DesktopName): # String
self.add_query_param('DesktopName', DesktopName)
def get_Amount(self): # Integer
return self.get_query_params().get('Amount')
def set_Amount(self, Amount): # Integer
self.add_query_param('Amount', Amount)
def get_Period(self): # Integer
return self.get_query_params().get('Period')
def set_Period(self, Period): # Integer
self.add_query_param('Period', Period)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def get_UserCommandss(self): # RepeatList
return self.get_query_params().get('UserCommands')
def set_UserCommandss(self, UserCommands): # RepeatList
for depth1 in range(len(UserCommands)):
if UserCommands[depth1].get('ContentEncoding') is not None:
self.add_query_param('UserCommands.' + str(depth1 + 1) + '.ContentEncoding', UserCommands[depth1].get('ContentEncoding'))
if UserCommands[depth1].get('Content') is not None:
self.add_query_param('UserCommands.' + str(depth1 + 1) + '.Content', UserCommands[depth1].get('Content'))
if UserCommands[depth1].get('ContentType') is not None:
self.add_query_param('UserCommands.' + str(depth1 + 1) + '.ContentType', UserCommands[depth1].get('ContentType'))
def get_GroupId(self): # String
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_query_param('GroupId', GroupId)
def get_PromotionId(self): # String
return self.get_query_params().get('PromotionId')
def set_PromotionId(self, PromotionId): # String
self.add_query_param('PromotionId', PromotionId)
def get_PeriodUnit(self): # String
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self, PeriodUnit): # String
self.add_query_param('PeriodUnit', PeriodUnit)
def get_AutoRenew(self): # Boolean
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self, AutoRenew): # Boolean
self.add_query_param('AutoRenew', AutoRenew)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType)
def get_PolicyGroupId(self): # String
return self.get_query_params().get('PolicyGroupId')
def set_PolicyGroupId(self, PolicyGroupId): # String
self.add_query_param('PolicyGroupId', PolicyGroupId)
def get_UserName(self): # String
return self.get_query_params().get('UserName')
def set_UserName(self, UserName): # String
self.add_query_param('UserName', UserName)
|
6da0f606b29e0b7003987e42c00bb8e11ef2e15b
|
66f383fec502102bfec58ed8cb9c43a71e599c55
|
/utils/text/general.py
|
790f97308e92da9c3d76e1502a7d41d1f952daad
|
[
"MIT"
] |
permissive
|
hacktoolkit/django-htk
|
0a984a28f7fbc7eed8e2b1975d210792ddbee829
|
935c4913e33d959f8c29583825f72b238f85b380
|
refs/heads/master
| 2023-08-08T11:52:54.298160
| 2023-07-21T19:08:37
| 2023-07-21T19:08:37
| 15,924,904
| 210
| 65
|
MIT
| 2023-09-08T23:59:28
| 2014-01-15T04:23:40
|
Python
|
UTF-8
|
Python
| false
| false
| 236
|
py
|
general.py
|
def is_alpha(c):
result = ord('A') <= ord(c.upper()) <= ord('Z')
return result
def is_ascii(c):
result = 0 <= ord(c) <= 127
return result
def is_ascii_extended(c):
result = 128 <= ord(c) <= 255
return result
|
38b4b03992b9f6fc5c2a36360e9ce23f24f81b49
|
75f6caa20ec4de14fabd30b29cc4e7a47a4e6ec9
|
/gym_trading/tests/test_broker.py
|
f15e686c9c057756dd9d6546d006c339ba241315
|
[] |
no_license
|
sadighian/crypto-rl
|
e9cd89279554278732e1017923fbb502f031ddc0
|
078081e5715cadeae9c798a3d759c9d59d2041bc
|
refs/heads/arctic-streaming-ticks-full
| 2022-01-27T18:15:57.831073
| 2021-11-30T13:52:18
| 2021-11-30T13:52:18
| 138,102,346
| 676
| 202
| null | 2022-01-12T11:58:09
| 2018-06-21T01:06:01
|
Python
|
UTF-8
|
Python
| false
| false
| 14,715
|
py
|
test_broker.py
|
import unittest
from gym_trading.utils.broker import Broker
from gym_trading.utils.decorator import debugging
from gym_trading.utils.order import LimitOrder, MarketOrder
class MarketOrderTestCases(unittest.TestCase):
@debugging
def test_case_one(self):
print('\nTest_Case_One')
test_position = Broker()
midpoint = 100.
fee = .003
order_open = MarketOrder(ccy='BTC-USD', side='long', price=midpoint, step=1)
test_position.add(order=order_open)
self.assertEqual(1, test_position.long_inventory.position_count)
print('LONG Unrealized_pnl: %f' % test_position.long_inventory.get_unrealized_pnl(
price=midpoint))
self.assertEqual(0, test_position.short_inventory.position_count)
self.assertEqual(0., test_position.short_inventory.get_unrealized_pnl(
price=midpoint))
order_close = MarketOrder(ccy='BTC-USD', side='long',
price=midpoint + (midpoint * fee * 5), step=100)
test_position.remove(order=order_close)
self.assertEqual(0, test_position.long_inventory.position_count)
print('LONG Unrealized_pnl: %f' % test_position.long_inventory.get_unrealized_pnl(
price=midpoint))
self.assertEqual(test_position.short_inventory.position_count, 0)
self.assertEqual(
test_position.short_inventory.get_unrealized_pnl(price=midpoint), 0.)
print('LONG Realized_pnl: %f' % test_position.realized_pnl)
@debugging
def test_case_two(self):
print('\nTest_Case_Two')
test_position = Broker()
midpoint = 100.
fee = .003
order_open = MarketOrder(ccy='BTC-USD', side='short', price=midpoint, step=1)
test_position.add(order=order_open)
self.assertEqual(1, test_position.short_inventory.position_count)
self.assertEqual(0, test_position.long_inventory.position_count)
self.assertEqual(0., test_position.long_inventory.get_unrealized_pnl(
price=midpoint))
print(
'SHORT Unrealized_pnl: %f' % test_position.short_inventory.get_unrealized_pnl(
price=midpoint))
order_close = MarketOrder(ccy='BTC-USD', side='short',
price=midpoint - (midpoint * fee * 15), step=100)
test_position.remove(order=order_close)
self.assertEqual(0, test_position.short_inventory.position_count)
self.assertEqual(0, test_position.long_inventory.position_count)
self.assertEqual(0., test_position.long_inventory.get_unrealized_pnl(
price=midpoint))
print(
'SHORT Unrealized_pnl: %f' % test_position.short_inventory.get_unrealized_pnl(
price=midpoint))
print('SHORT Realized_pnl: %f' % test_position.realized_pnl)
@debugging
def test_case_three(self):
print('\nTest_Case_Three')
test_position = Broker(5)
midpoint = 100.
for i in range(10):
order_open = MarketOrder(ccy='BTC-USD', side='long', price=midpoint - i, step=i)
test_position.add(order=order_open)
self.assertEqual(5, test_position.long_inventory.position_count)
self.assertEqual(0, test_position.short_inventory.position_count)
print('Confirm we have 5 positions: %i' % test_position.long_inventory.position_count)
for i in range(10):
order_open = MarketOrder(ccy='BTC-USD', side='long', price=midpoint + i, step=i)
test_position.remove(order=order_open)
self.assertEqual(0, test_position.long_inventory.position_count)
self.assertEqual(0, test_position.short_inventory.position_count)
class LimitOrderTestCases(unittest.TestCase):
@debugging
def test_long_pnl(self):
test_position = Broker()
step = 0
bid_price = 101.
ask_price = 102.
buy_volume = 100
sell_volume = 100
pnl = 0.
def walk_forward(pnl, step, bid_price, ask_price, buy_volume, sell_volume, down=True):
for i in range(50):
step += 1
if down:
bid_price *= 0.99
ask_price *= 0.99
else:
bid_price *= 1.01
ask_price *= 1.01
pnl, is_long_order_filled, is_short_order_filled = \
test_position.step_limit_order_pnl(
bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
sell_volume=sell_volume, step=step)
pnl += pnl
if i % 10 == 0:
print('bid_price={:.2f} | ask_price={:.2f}'.format(bid_price,
ask_price))
return step, bid_price, ask_price, buy_volume, sell_volume, pnl
test_position.add(
order=LimitOrder(ccy='BTC-USD', side='long', price=100., step=step,
queue_ahead=1000))
step, _, _, buy_volume, sell_volume, pnl = walk_forward(pnl, step, bid_price,
ask_price, buy_volume,
sell_volume, down=True)
self.assertEqual(1, test_position.long_inventory_count)
test_position.add(
order=LimitOrder(ccy='BTC-USD', side='short', price=105., step=step,
queue_ahead=0))
_, _, _, _, _, pnl = walk_forward(pnl, step, bid_price, ask_price, buy_volume,
sell_volume, down=False)
realized_pnl = round(test_position.realized_pnl, 3)
self.assertEqual(0.05, realized_pnl,
"Expected Realized PnL of 0.5 and got {}".format(realized_pnl))
self.assertEqual(0,
test_position.short_inventory_count +
test_position.long_inventory_count)
print("PnL: {}".format(pnl))
@debugging
def test_avg_exe(self):
test_position = Broker()
# perform a partial fill on the first order
step = 0
bid_price = 101.
ask_price = 102.
buy_volume = 500
sell_volume = 500
test_position.add(
order=LimitOrder(ccy='BTC-USD', side='long', price=bid_price, step=step,
queue_ahead=0))
print("taking first step...")
step += 1
pnl, is_long_order_filled, is_short_order_filled = \
test_position.step_limit_order_pnl(
bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
sell_volume=sell_volume, step=step)
pnl += pnl
self.assertEqual(500, test_position.long_inventory.order.executed)
self.assertEqual(0, test_position.long_inventory_count)
# if order gets filled with a bid below the order's price, the order should NOT
# receive any price improvement during the execution.
bid_price = 99.
ask_price = 100.
test_position.add(
order=LimitOrder(ccy='BTC-USD', side='long', price=bid_price, step=step,
queue_ahead=0))
print("taking second step...")
step += 1
pnl, is_long_order_filled, is_short_order_filled = \
test_position.step_limit_order_pnl(
bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
sell_volume=sell_volume, step=step)
pnl += pnl
self.assertEqual(1, test_position.long_inventory_count)
self.assertEqual(100., test_position.long_inventory.average_price)
print("PnL: {}".format(pnl))
@debugging
def test_lob_queuing(self):
test_position = Broker()
# perform a partial fill on the first order
step = 0
bid_price = 102.
ask_price = 103.
buy_volume = 500
sell_volume = 500
queue_ahead = 800
order_open = LimitOrder(ccy='BTC-USD', side='long', price=bid_price, step=step,
queue_ahead=queue_ahead)
test_position.add(order=order_open)
step += 1
pnl, is_long_order_filled, is_short_order_filled = \
test_position.step_limit_order_pnl(
bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
sell_volume=sell_volume, step=step)
pnl += pnl
print("#1 long_inventory.order = \n{}".format(test_position.long_inventory.order))
self.assertEqual(300, test_position.long_inventory.order.queue_ahead)
self.assertEqual(0, test_position.long_inventory.order.executed)
self.assertEqual(0, test_position.long_inventory_count)
step += 1
pnl, is_long_order_filled, is_short_order_filled = \
test_position.step_limit_order_pnl(
bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
sell_volume=sell_volume, step=step)
pnl += pnl
print("#2 long_inventory.order = \n{}".format(test_position.long_inventory.order))
self.assertEqual(200, test_position.long_inventory.order.executed)
self.assertEqual(0, test_position.long_inventory_count)
# if order gets filled with a bid below the order's price, the order should NOT
# receive any price improvement during the execution.
bid_price = 100.
ask_price = 102.
order_open = LimitOrder(ccy='BTC-USD', side='long', price=bid_price, step=step,
queue_ahead=queue_ahead)
test_position.add(order=order_open)
print("#3 long_inventory.order = \n{}".format(test_position.long_inventory.order))
self.assertEqual(0, test_position.long_inventory_count)
bid_price = 100.
for i in range(5):
step += 1
pnl, is_long_order_filled, is_short_order_filled = \
test_position.step_limit_order_pnl(
bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
sell_volume=sell_volume, step=step)
pnl += pnl
self.assertEqual(1, test_position.long_inventory_count)
self.assertEqual(100.40, round(test_position.long_inventory.average_price, 2))
print("PnL: {}".format(pnl))
@debugging
def test_queues_ahead_features(self):
test_position = Broker()
# perform a partial fill on the first order
step = 0
bid_price = 100.
ask_price = 200.
buy_volume = 0
sell_volume = 0
order_open_long = LimitOrder(ccy='BTC-USD', side='long', price=bid_price,
step=step, queue_ahead=0)
order_open_short = LimitOrder(ccy='BTC-USD', side='short', price=ask_price,
step=step, queue_ahead=2000)
print('opening long position = {}'.format(order_open_long))
test_position.add(order=order_open_long)
print('opening short position = {}'.format(order_open_short))
test_position.add(order=order_open_short)
print('\ntaking first step...')
step += 1
pnl, is_long_order_filled, is_short_order_filled = \
test_position.step_limit_order_pnl(
bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
sell_volume=sell_volume, step=step)
pnl += pnl
print("#1 long_inventory.order = \n{}".format(test_position.long_inventory.order))
print(
"#1 short_inventory.order = \n{}".format(test_position.short_inventory.order))
bid_queue, ask_queue = test_position.get_queues_ahead_features()
print("#1 get_queues_ahead_features:\nbid_queue={} || ask_queue={}".format(
bid_queue, ask_queue))
self.assertEqual(0., bid_queue)
self.assertEqual(-0.67, round(ask_queue, 2))
print('\ntaking second step...')
buy_volume = 500
sell_volume = 500
step += 1
pnl, is_long_order_filled, is_short_order_filled = \
test_position.step_limit_order_pnl(
bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
sell_volume=sell_volume, step=step)
pnl += pnl
print("#2 long_inventory.order = \n{}".format(test_position.long_inventory.order))
print(
"#2 short_inventory.order = \n{}".format(test_position.short_inventory.order))
bid_queue, ask_queue = test_position.get_queues_ahead_features()
print("#2 get_queues_ahead_features:\nbid_queue={} || ask_queue={}".format(
bid_queue, ask_queue))
self.assertEqual(0.5, bid_queue)
self.assertEqual(-0.6, round(ask_queue, 2))
print('\ntaking third step...')
buy_volume = 500
sell_volume = 499
step += 1
pnl, is_long_order_filled, is_short_order_filled = \
test_position.step_limit_order_pnl(
bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
sell_volume=sell_volume, step=step)
pnl += pnl
print("#3 long_inventory.order = \n{}".format(test_position.long_inventory.order))
print(
"#3 short_inventory.order = \n{}".format(test_position.short_inventory.order))
bid_queue, ask_queue = test_position.get_queues_ahead_features()
print("#3 get_queues_ahead_features:\nbid_queue={} || ask_queue={}".format(
bid_queue, ask_queue))
self.assertEqual(0.999, bid_queue)
self.assertEqual(-0.5, round(ask_queue, 2))
print('\ntaking fourth step...')
buy_volume = 500
sell_volume = 500
step += 1
pnl, is_long_order_filled, is_short_order_filled = \
test_position.step_limit_order_pnl(
bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
sell_volume=sell_volume, step=step)
pnl += pnl
print("#4 long_inventory.order = \n{}".format(test_position.long_inventory.order))
print(
"#4 short_inventory.order = \n{}".format(test_position.short_inventory.order))
bid_queue, ask_queue = test_position.get_queues_ahead_features()
print("#4 get_queues_ahead_features:\nbid_queue={} || ask_queue={}".format(
bid_queue, ask_queue))
self.assertEqual(0.0, bid_queue)
self.assertEqual(-0.33, round(ask_queue, 2))
print("PnL: {}".format(pnl))
if __name__ == '__main__':
unittest.main()
|
ab1a3eaac361056ea9a765345d2f85716610db79
|
48d6a692666f85a353cd2c7a89581e2207fdd6a0
|
/pymeasure/instruments/velleman/velleman_k8090.py
|
366081ae51713d83b7af7e1adefa42ea7af04f7d
|
[
"MIT"
] |
permissive
|
pymeasure/pymeasure
|
21fa07592adfb63944fd35723d82853133e103be
|
c04cfc05dc48fa5f3c4ff1e0f223751da6e7d8b5
|
refs/heads/master
| 2023-09-02T02:07:05.118773
| 2023-09-01T18:27:43
| 2023-09-01T18:27:43
| 18,864,038
| 271
| 172
|
MIT
| 2023-09-11T16:42:56
| 2014-04-17T02:31:52
|
Python
|
UTF-8
|
Python
| false
| false
| 9,164
|
py
|
velleman_k8090.py
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2023 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from enum import IntFlag
import logging
from pyvisa import VisaIOError
from pymeasure.adapters import SerialAdapter, VISAAdapter
from pymeasure.instruments import Instrument
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class VellemanK8090Switches(IntFlag):
"""Use to identify switch channels."""
NONE = 0
CH1 = 1 << 0
CH2 = 1 << 1
CH3 = 1 << 2
CH4 = 1 << 3
CH5 = 1 << 4
CH6 = 1 << 5
CH7 = 1 << 6
CH8 = 1 << 7
ALL = CH1 | CH2 | CH3 | CH4 | CH5 | CH6 | CH7 | CH8
def _parse_channels(channels) -> str:
"""Convert array of channel numbers into mask if needed."""
if isinstance(channels, list):
mask = VellemanK8090Switches.NONE
for ch in channels:
mask |= 1 << (ch - 1)
else:
mask = channels
return hex(mask)
def _get_process_status(items):
"""Process the result of a 0x51 status message.
:param items: List of 4 integers: [CMD, MASK, Param1, Param2]
"""
if len(items) < 4 or items[0] != 0x51:
return None, None, None
return [VellemanK8090Switches(it) for it in items[1:]]
class VellemanK8090(Instrument):
"""For usage with the K8090 relay board, by Velleman.
View the "K8090/VM8090 PROTOCOL MANUAL" for the serial command instructions.
The communication is done by serial USB. The IO settings are fixed:
================== ==================
Baud rate 19200
Data bits 8
Parity None
Stop bits 1
Flow control None
================== ==================
A short timeout is recommended, since the device is not consistent in giving status messages
and serial timeouts will occur also in normal operation.
Use the class like:
.. code-block:: python
from pymeasure.instruments.velleman import VellemanK8090, VellemanK8090Switches as Switches
instrument = VellemanK8090("ASRL1::INSTR")
# Get status update from device
last_on, curr_on, time_on = instrument.status
# Toggle a selection of channels on
instrument.switch_on = Switches.CH3 | Switches.CH4 | Switches.CH5
"""
def __init__(self, adapter, name="Velleman K8090", timeout=100, **kwargs):
super().__init__(
adapter,
name=name,
asrl={"baud_rate": 19200},
write_termination="",
read_termination="",
timeout=timeout,
**kwargs,
)
BYTE_STX = 0x04
BYTE_ETX = 0x0F
version = Instrument.measurement(
"0x71",
"""
Get firmware version, as (year - 2000, week). E.g. ``(10, 1)``
""",
cast=int,
get_process=lambda v: (v[2], v[3]) if len(v) > 3 and v[0] == 0x71 else None,
)
status = Instrument.measurement(
"0x18",
"""
Get current relay status.
The reply has a different command byte than the request.
Three items (:class:`VellemanK8090Switches` flags) are returned:
* Previous state: the state of each relay before this event
* Current state: the state of each relay now
* Timer state: the state of each relay timer
""",
cast=int,
get_process=_get_process_status,
)
switch_on = Instrument.setting(
"0x11,%s",
""""
Switch on a set of channels. Other channels are unaffected.
Pass either a list or set of channel numbers (starting at 1), or pass a bitmask.
After switching this waits for a reply from the device. This is only send when
a relay actually toggles, otherwise expect a blocking time equal to the
communication timeout
If speed is important, avoid calling `switch_` unnecessarily.
""",
set_process=_parse_channels,
check_set_errors=True,
)
switch_off = Instrument.setting(
"0x12,%s",
"""
Switch off a set of channels. See :attr:`switch_on` for more details.
""",
set_process=_parse_channels,
check_set_errors=True,
)
id = None # No identification available
def _make_checksum(self, command, mask, param1, param2):
# The formula from the sheet requires twos-complement negation,
# this works
return 1 + 0xFF - ((self.BYTE_STX + command + mask + param1 + param2) & 0xFF)
def write(self, command, **kwargs):
"""The write command specifically for the protocol of the K8090.
This overrides the method from the ``Instrument`` class.
Each packet to the device is 7 bytes:
STX (0x04) - CMD - MASK - PARAM1 - PARAM2 - CHK - ETX (0x0F)
Where `CHK` is checksum of the package.
:param command: String like "CMD[, MASK, PARAM1, PARAM2]" - only CMD is mandatory
:type command: str
"""
# The device can give status updates when we don't expect it,
# drop anything from the buffer first
if isinstance(self.adapter, VISAAdapter):
self.adapter.flush_read_buffer()
elif isinstance(self.adapter, SerialAdapter):
# The SerialAdapter does not have `flush_read_buffer` implemented
self.adapter.connection.flush()
items_str = command.split(",")
items = [int(it, 16) for it in items_str]
cmd = items[0]
mask = items[1] if len(items) > 1 else 0
param1 = items[2] if len(items) > 2 else 0
param2 = items[3] if len(items) > 3 else 0
checksum = self._make_checksum(cmd, mask, param1, param2)
content = [
self.BYTE_STX,
cmd,
mask,
param1,
param2,
checksum,
self.BYTE_ETX,
]
self.write_bytes(bytes(content))
def read(self, **kwargs):
"""The read command specifically for the protocol of the K8090.
This overrides the method from the ``instrument`` class.
See :meth:`write`, replies from the machine use the same format.
A read will return a list of CMD, MASK, PARAM1 and PARAM2.
"""
# A message is always 7 bytes
# (there is also a termination char, but since it is not exclusive it cannot be
# reliably used)
response = self.read_bytes(7)
if len(response) < 7:
raise ConnectionError(f"Incoming packet was {len(response)} bytes instead of 7")
# Only consider the most recent block
stx, command, mask, param1, param2, checksum, etx = list(response[-7:])
if stx != self.BYTE_STX or etx != self.BYTE_ETX:
raise ConnectionError(f"Received invalid start and stop bytes `{stx}` and `{etx}`")
if command == 0x00:
raise ConnectionError(f"Received invalid command byte `{command}`")
real_checksum = self._make_checksum(command, mask, param1, param2)
if real_checksum != checksum:
raise ConnectionError(
f"Packet checksum was not correct, got {hex(checksum)} "
f"instead of {hex(real_checksum)}"
)
values_str = [str(v) for v in [command, mask, param1, param2]]
return ",".join(values_str)
def check_set_errors(self):
"""Check for errors after having set a property and log them.
Called if :code:`check_set_errors=True` is set for that property.
The K8090 replies with a status after a switch command, but
**only** after any switch actually changed. In order to guarantee
the buffer is empty, we attempt to read it fully here.
No actual error checking is done here!
:return: List of error entries.
"""
try:
self.read()
except (VisaIOError, ConnectionError):
pass # Ignore a timeout
except Exception as exc:
log.exception("Setting a property failed.", exc_info=exc)
raise
else:
return []
|
89e22a0feb088890ea2e8325de1eddc9ef393e8d
|
becf2cde221ca33b165d348203400e3290630f81
|
/winpython/_vendor/qtpy/tests/QtDesigner.py
|
969d8480b1b6aa448f131f0f7fbcad6b089adf40
|
[
"MIT"
] |
permissive
|
winpython/winpython
|
620fe3c7aa91e593f614d5806184f28eb6c8f480
|
323c6fef4100220a84daf964ed0b78058862bc29
|
refs/heads/master
| 2023-09-05T00:43:32.719477
| 2023-08-26T14:59:41
| 2023-08-26T14:59:41
| 24,275,324
| 1,796
| 373
|
MIT
| 2023-08-26T14:59:42
| 2014-09-20T21:47:26
|
Python
|
UTF-8
|
Python
| false
| false
| 646
|
py
|
QtDesigner.py
|
# -----------------------------------------------------------------------------
# Copyright © 2014-2015 Colin Duquesnoy
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Provides QtDesigner classes and functions."""
from . import (
PYQT5,
PYQT6,
PYSIDE2,
PYSIDE6,
QtBindingMissingModuleError,
)
if PYQT5:
from PyQt5.QtDesigner import *
elif PYQT6:
from PyQt6.QtDesigner import *
elif PYSIDE2:
raise QtBindingMissingModuleError(name='QtDesigner')
elif PYSIDE6:
from PySide6.QtDesigner import *
|
329b595238379ded4d3ef3aa312f6990b481dd06
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_functionapp_access_restriction_commands.py
|
ef764c499aa305b74e8387ded5956e79a62621de
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 13,723
|
py
|
test_functionapp_access_restriction_commands.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-few-public-methods
import json
import unittest
import jmespath
from azure.cli.core.azclierror import (ResourceNotFoundError, ArgumentUsageError, InvalidArgumentValueError,
MutuallyExclusiveArgumentError)
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, JMESPathCheck, StorageAccountPreparer)
from knack.cli import CLIError
from knack.log import get_logger
logger = get_logger(__name__)
WINDOWS_ASP_LOCATION_WEBAPP = 'japanwest'
WINDOWS_ASP_LOCATION_FUNCTIONAPP = 'francecentral'
LINUX_ASP_LOCATION_WEBAPP = 'eastus2'
LINUX_ASP_LOCATION_FUNCTIONAPP = 'ukwest'
class FunctionAppAccessRestrictionScenarioTest(ScenarioTest):
@ResourceGroupPreparer(parameter_name_for_location='location', location=WINDOWS_ASP_LOCATION_WEBAPP)
@StorageAccountPreparer()
def test_functionapp_access_restriction_show(self, resource_group, location):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-funcapp-nwr', length=24),
'loc': location
})
self.cmd('functionapp create -g {rg} -n {app_name} --consumption-plan-location {loc} -s {sa} --functions-version 4', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('functionapp config access-restriction show -g {rg} -n {app_name}', checks=[
JMESPathCheck('length(@)', 3),
JMESPathCheck('length(ipSecurityRestrictions)', 1),
JMESPathCheck('ipSecurityRestrictions[0].name', 'Allow all'),
JMESPathCheck('ipSecurityRestrictions[0].action', 'Allow'),
JMESPathCheck('length(scmIpSecurityRestrictions)', 1),
JMESPathCheck('scmIpSecurityRestrictions[0].name', 'Allow all'),
JMESPathCheck('scmIpSecurityRestrictions[0].action', 'Allow'),
JMESPathCheck('scmIpSecurityRestrictionsUseMain', False)
])
@ResourceGroupPreparer(parameter_name_for_location='location', location=WINDOWS_ASP_LOCATION_WEBAPP)
@StorageAccountPreparer()
def test_functionapp_access_restriction_set_simple(self, resource_group, location):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-funcapp-nwr', length=24),
'loc': location
})
self.cmd('functionapp create -g {rg} -n {app_name} --consumption-plan-location {loc} -s {sa} --functions-version 4', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('functionapp config access-restriction set -g {rg} -n {app_name} --use-same-restrictions-for-scm-site true', checks=[
JMESPathCheck('scmIpSecurityRestrictionsUseMain', True)
])
@ResourceGroupPreparer(parameter_name_for_location='location', location=WINDOWS_ASP_LOCATION_WEBAPP)
@StorageAccountPreparer()
def test_functionapp_access_restriction_set_complex(self, resource_group, location):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-funcapp-nwr', length=24),
'loc': location
})
self.cmd('functionapp create -g {rg} -n {app_name} --consumption-plan-location {loc} -s {sa} --functions-version 4', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('functionapp config access-restriction set -g {rg} -n {app_name} --use-same-restrictions-for-scm-site', checks=[
JMESPathCheck('scmIpSecurityRestrictionsUseMain', True)
])
self.cmd('functionapp config access-restriction set -g {rg} -n {app_name} --use-same-restrictions-for-scm-site false', checks=[
JMESPathCheck('scmIpSecurityRestrictionsUseMain', False)
])
@ResourceGroupPreparer(random_name_length=17, parameter_name_for_location='location', location=WINDOWS_ASP_LOCATION_WEBAPP)
# random_name_length is temporary until the bug fix in the API is deployed successfully & then should be removed.
@StorageAccountPreparer()
def test_functionapp_access_restriction_add(self, resource_group, location):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-funcapp-nwr', length=24),
'loc': location
})
self.cmd('functionapp create -g {rg} -n {app_name} --consumption-plan-location {loc} -s {sa} --functions-version 4', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('functionapp config access-restriction add -g {rg} -n {app_name} --rule-name developers --action Allow --ip-address 130.220.0.0/27 --priority 200', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'developers'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
@ResourceGroupPreparer(parameter_name_for_location='location', location=WINDOWS_ASP_LOCATION_WEBAPP)
@StorageAccountPreparer()
def test_functionapp_access_restriction_add_ip_address_validation(self, resource_group, location):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-funcapp-nwr', length=24),
'loc': location
})
self.cmd('functionapp create -g {rg} -n {app_name} --consumption-plan-location {loc} -s {sa} --functions-version 4', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('functionapp config access-restriction add -g {rg} -n {app_name} --rule-name ipv4 --action Allow --ip-address 130.220.0.0 --priority 200', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'ipv4'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[0].ipAddress', '130.220.0.0/32'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
self.cmd('functionapp config access-restriction add -g {rg} -n {app_name} --rule-name ipv6 --action Allow --ip-address 2004::1000 --priority 200', checks=[
JMESPathCheck('length(@)', 3),
JMESPathCheck('[1].name', 'ipv6'),
JMESPathCheck('[1].action', 'Allow'),
JMESPathCheck('[1].ipAddress', '2004::1000/128')
])
@unittest.skip("Invalid test case that cannot pass in the live mode.")
@ResourceGroupPreparer(parameter_name_for_location='location', location=WINDOWS_ASP_LOCATION_WEBAPP)
@StorageAccountPreparer()
def test_functionapp_access_restriction_add_service_endpoint(self, resource_group, location):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-funcapp-nwr', length=24),
'plan_name': self.create_random_name(prefix='cli-plan-nwr', length=24),
'vnet_name': self.create_random_name(prefix='cli-vnet-nwr', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('functionapp create -g {rg} -n {app_name} --plan {plan_name} -s {sa}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('az network vnet create -g {rg} -n {vnet_name} --address-prefixes 10.0.0.0/16 --subnet-name endpoint-subnet --subnet-prefixes 10.0.0.0/24', checks=[
JMESPathCheck('subnets[0].serviceEndpoints', None)
])
self.cmd('functionapp config access-restriction add -g {rg} -n {app_name} --rule-name vnet-integration --action Allow --vnet-name {vnet_name} --subnet endpoint-subnet --priority 150', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'vnet-integration'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
@ResourceGroupPreparer(parameter_name_for_location='location', location=WINDOWS_ASP_LOCATION_WEBAPP)
@StorageAccountPreparer()
def test_functionapp_access_restriction_remove(self, resource_group, location):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-funcapp-nwr', length=24),
'loc': location
})
self.cmd('functionapp create -g {rg} -n {app_name} --consumption-plan-location {loc} -s {sa} --functions-version 4', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('functionapp config access-restriction add -g {rg} -n {app_name} --rule-name developers --action Allow --ip-address 130.220.0.0/27 --priority 200', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'developers'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
self.cmd('functionapp config access-restriction remove -g {rg} -n {app_name} --rule-name developers', checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', 'Allow all'),
JMESPathCheck('[0].action', 'Allow')
])
@ResourceGroupPreparer(parameter_name_for_location='location', location=WINDOWS_ASP_LOCATION_WEBAPP)
@StorageAccountPreparer()
def test_functionapp_access_restriction_add_scm(self, resource_group, location):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-funcapp-nwr', length=24),
'loc': location
})
self.cmd('functionapp create -g {rg} -n {app_name} --consumption-plan-location {loc} -s {sa} --functions-version 4', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('functionapp config access-restriction add -g {rg} -n {app_name} --rule-name developers --action Allow --ip-address 130.220.0.0/27 --priority 200 --scm-site', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'developers'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
@ResourceGroupPreparer(parameter_name_for_location='location', location=WINDOWS_ASP_LOCATION_WEBAPP)
@StorageAccountPreparer()
def test_functionapp_access_restriction_remove_scm(self, resource_group, location):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-funcapp-nwr', length=24),
'loc': location
})
self.cmd('functionapp create -g {rg} -n {app_name} --consumption-plan-location {loc} -s {sa} --functions-version 4', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('functionapp config access-restriction add -g {rg} -n {app_name} --rule-name developers --action Allow --ip-address 130.220.0.0/27 --priority 200 --scm-site', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'developers'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
self.cmd('functionapp config access-restriction remove -g {rg} -n {app_name} --rule-name developers --scm-site', checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', 'Allow all'),
JMESPathCheck('[0].action', 'Allow')
])
@unittest.skip("Function app slot shouldn't use webapp")
@ResourceGroupPreparer(parameter_name_for_location='location', location=WINDOWS_ASP_LOCATION_WEBAPP)
@StorageAccountPreparer()
def test_functionapp_access_restriction_slot(self, resource_group, location):
self.kwargs.update({
'app_name': self.create_random_name(prefix='cli-funcapp-nwr', length=24),
'loc': location,
'slot_name': 'stage'
})
self.cmd('functionapp create -g {rg} -n {app_name} --consumption-plan-location {loc} -s {sa}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('functionapp deployment slot create -g {rg} -n {app_name} --slot {slot_name}', checks=[
JMESPathCheck('state', 'Running')
])
self.cmd('functionapp config access-restriction show -g {rg} -n {app_name} --slot {slot_name}', checks=[
JMESPathCheck('length(@)', 3),
JMESPathCheck('length(ipSecurityRestrictions)', 1),
JMESPathCheck('ipSecurityRestrictions[0].name', 'Allow all'),
JMESPathCheck('ipSecurityRestrictions[0].action', 'Allow'),
JMESPathCheck('length(scmIpSecurityRestrictions)', 1),
JMESPathCheck('scmIpSecurityRestrictions[0].name', 'Allow all'),
JMESPathCheck('scmIpSecurityRestrictions[0].action', 'Allow'),
JMESPathCheck('scmIpSecurityRestrictionsUseMain', False)
])
self.cmd('functionapp config access-restriction add -g {rg} -n {app_name} --rule-name developers --action Allow --ip-address 130.220.0.0/27 --priority 200 --slot {slot_name}', checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].name', 'developers'),
JMESPathCheck('[0].action', 'Allow'),
JMESPathCheck('[1].name', 'Deny all'),
JMESPathCheck('[1].action', 'Deny')
])
if __name__ == '__main__':
unittest.main()
|
9abbc9f9e312bcdff76e63cbf72a409425f5c4f3
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/0109. Convert Sorted List to Binary Search Tree/0109.py
|
40bbdf9b97147efc8f6e18fae0186a00e309728a
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 574
|
py
|
0109.py
|
class Solution:
def sortedListToBST(self, head: ListNode) -> TreeNode:
def findMid(head: ListNode) -> ListNode:
prev = None
slow = head
fast = head
while fast and fast.next:
prev = slow
slow = slow.next
fast = fast.next.next
prev.next = None
return slow
if not head:
return None
if not head.next:
return TreeNode(head.val)
mid = findMid(head)
root = TreeNode(mid.val)
root.left = self.sortedListToBST(head)
root.right = self.sortedListToBST(mid.next)
return root
|
39711f632d7b362d5a7126f40a15067bf2b8c0aa
|
d6aa06bd9925ad39a045c7bf4534776fe417f4b4
|
/asm/cosasm.py
|
b377313a19cb6aaf95230374537a8878aaaaf62c
|
[
"MIT",
"Unlicense"
] |
permissive
|
clbx/Cosmic
|
a86e099b221ed7ec15725a7141d784e5ff7032ec
|
98eef8e1ee3105dd51e8d5b015920218c5807009
|
refs/heads/master
| 2022-01-03T04:37:33.965491
| 2021-12-05T21:43:41
| 2021-12-05T21:43:41
| 206,172,141
| 115
| 5
|
MIT
| 2020-05-27T17:01:02
| 2019-09-03T21:05:13
|
C++
|
UTF-8
|
Python
| false
| false
| 14,923
|
py
|
cosasm.py
|
import sys
import math
import re
#The Instruction Set
InstructionSet = {
"IMP NOP":0x00,
"IMP HCF":0x01,
"IMP PUSH":0x02,
"IMP POP":0x03,
"REG SWP":0x04,
"IMM CALL":0x05,
"ABS CALL":0x06,
"IND CALL":0x07,
"IMP RET":0x08,
"IMM ADD":0x10,
"ABS ADD":0x11,
"IND ADD":0x12,
"REG ADD":0x13,
"IMM ADDX":0x14,
"ABS ADDX":0x15,
"IND ADDX":0x16,
"REG ADDX":0x17,
"IMM SUB":0x18,
"ABS SUB":0x19,
"IND SUB":0x1A,
"REG SUB":0x1B,
"IMM SUBX":0x1C,
"ABS SUBX":0x1D,
"IND SUBX":0x1E,
"REG SUBX":0x1F,
"IMM MUL":0x20,
"ABS MUL":0x21,
"IND MUL":0x22,
"REG MUL":0x23,
"IMM MULX":0x24,
"ABS MULX":0x25,
"IND MULX":0x26,
"REG MULX":0x27,
"IMM DIV":0x28,
"ABS DIV":0x29,
"IND DIV":0x2A,
"REG DIV":0x2B,
"IMM DIVX":0x2C,
"ABS DIVX":0x2D,
"IND DIVX":0x2E,
"REG DIVX":0x2F,
"IMM ABS MOV":0x30,
"ABS ABS MOV":0x31,
"IND ABS MOV":0x32,
"REG ABS MOV":0x33,
"IMM IND MOV":0x34,
"ABS IND MOV":0x35,
"IND IND MOV":0x36,
"REG IND MOV":0x37,
"IMM REG MOV":0x38,
"ABS REG MOV":0x39,
"IND REG MOV":0x3A,
"REG REG MOV":0x3B,
"IMM SHL":0x3C,
"ABS SHL":0x3D,
"IND SHL":0x3E,
"REG SHL":0x3F,
"IMM ABS MOVX":0x40,
"ABS ABS MOVX":0x41,
"IND ABS MOVX":0x42,
"REG ABS MOVX":0x43,
"IMM IND MOVX":0x44,
"ABS IND MOVX":0x45,
"IND IND MOVX":0x46,
"REG IND MOVX":0x47,
"IMM REG MOVX":0x48,
"ABS REG MOVX":0x49,
"IND REG MOVX":0x4A,
"REG REG MOVX":0x4B,
"IMM SHLX":0x4C,
"ABS SHLX":0x4D,
"IND SHLX":0x4E,
"REG SHLX":0x4F,
"IMM AND":0x50,
"ABS AND":0x51,
"IND AND":0x52,
"REG AND":0x53,
"IMM OR":0x54,
"ABS OR":0x55,
"IND OR":0x56,
"REG OR":0x57,
"IMM XOR":0x58,
"ABS XOR":0x59,
"IND XOR":0x5A,
"REG XOR":0x5B,
"IMM SHR":0x5C,
"ABS SHR":0x5D,
"IND SHR":0x5E,
"REG SHR":0x5F,
"IMM CMP":0x60,
"ABS CMP":0x61,
"IND CMP":0x62,
"REG CMP":0x63,
"IMM CMPX":0x64,
"ABS CMPX":0x65,
"IND CMPX":0x66,
"REG CMPX":0x67,
"IMM SHRX":0x6C,
"ABS SHRX":0x6D,
"IND SHRX":0x6E,
"REG SHRX":0x6F,
"IMM JMP":0x70,
"ABS JMP":0x71,
"IND JMP":0x72,
"REG JMP":0x73,
"IMM JZS":0x74,
"ABS JZS":0x75,
"IND JZS":0x76,
"REG JZS":0x77,
"IMM JNZ":0x78,
"ABS JNZ":0x79,
"IND JNZ":0x7A,
"REG JNZ":0x7B,
"IMM JCS":0x7C,
"ABS JCS":0x7D,
"IND JCS":0x7E,
"REG JCS":0x7F,
"IMM JNC":0x80,
"ABS JNC":0x81,
"IND JNC":0x82,
"REG JNC":0x83,
"IMM JOS":0x84,
"ABS JOS":0x85,
"IND JOS":0x86,
"REG JOS":0x87,
"IMM JNO":0x88,
"ABS JNO":0x89,
"IND JNO":0x8A,
"REG JNO":0x8B,
"IMM JNS":0x8C,
"ABS JNS":0x8D,
"IND JNS":0x8E,
"REG JNS":0x8F,
"IMM JNN":0x90,
"ABS JNN":0x91,
"IND JNN":0x92,
"REG JNN":0x93,
"IMM JLS":0x94,
"ABS JLS":0x95,
"IND JLS":0x96,
"REG JLS":0x97,
"IMM JNL":0x98,
"ABS JNL":0x99,
"IND JNL":0x9A,
"REG JNL":0x9B,
"IMM JES":0x9C,
"ABS JES":0x9D,
"IND JES":0x9E,
"REG JES":0x9F,
"IMP CSF":0xA0,
"IMP CZF":0xA1,
"IMP SZF":0xA2,
"IMP CNF":0xA3,
"IMP SNF":0xA4,
"IMP CCF":0xA5,
"IMP SCF":0xA6,
"IMP COF":0xA7,
"IMP SOF":0xA8,
"IMP CLF":0xA9,
"IMP SLF":0xAA,
"IMP CIF":0xAB,
"IMP SIF":0xAC,
"IMP CEF":0xAD,
"ABS INC":0xB0,
"IND INC":0xB1,
"REG INC":0xB2,
"ABS INCX":0xB3,
"IND INCX":0xB4,
"REG INCX":0xB5,
"ABS DEC":0xB6,
"IND DEC":0xB7,
"REG DEC":0xB8,
"ABS DECX":0xB9,
"IND DECX":0xBA,
"REG DECX":0xBB
}
opcodePattern = re.compile("[A-Z,a-z]{3,4}( [#,@,R]?[0-9,A-F]{1,}([ ][#,@,R]?[0-9,A-F]{1,})?)?$")
#variableTable = {
# "varName" = [pos,len]
# "counter" = [5,5]
#}
variableTable = {} #Where all variables are stored
labelTable = {} #Where all labels are stored
output = bytearray() #The amount of usable bytes for program data size 32768
variables = bytearray() #The mount of usable bytes for variables size 5120
currentLine = 0 #Current line of assembly
types = { #Valid variable types
"word","byte"
}
#Gets the addressing mode of the function by looking at a single operand
#Cannot find Implied
def getAddrMode(token):
if(token[0] == "#"):
return "IMM"
if(token[0] == "@"):
return "IND"
if(token[0] == "R"):
return "REG"
return "ABS"
#Helper function for adding large variables to the variable table
def addToVariables(value, size=0):
val = int(str(value),16)
if(size == 0):
size = int(math.ceil((val.bit_length())/8))
byteArr = val.to_bytes(size,byteorder="big")
variables.extend(byteArr)
#Variable Creation: <type> <name> = <value>
#All new types of variables can be put here
def createVar(tokens):
#If it doesn't fit that setup, return.
if(not tokens[2] == "="):
return
#byte counter = 5
if(tokens[0] == "byte"):
identifier = tokens[1]
value = tokens[3]
variableTable[identifier] = [len(variables),1]
addToVariables(value,1)
return
#word points = 50
if (tokens[0] == "word"):
identifier = tokens[1]
value = tokens[3]
variableTable[identifier] = [len(variables),2]
addToVariables(value,2)
else:
error("Unknown type {}".format(tokens[0]))
#Returns tokens with variables resolved
def resolveVariables(tokens):
#Go through the length of the opcode starting with the first operand and find variables
for i in range(1,len(tokens)):
addrMode = getAddrMode(tokens[i])
operator = ""
operand = ""
newoperand = ""
if(addrMode == "ABS"):
operand = tokens[i]
else:
operand = tokens[i][1:]
operator = tokens[i][0]
if(operand in variableTable):
#print(variables[variableTable[operand][0]])
if(variableTable[operand][1] > 2):
warning("Variable {} is larger than opcode can handle".format(operand))
print("LOCATION: {}".format((variableTable[operand][0] + 0xC800)))
print("TEST {}".format(0x09+0x02,'x'))
tokens[i] = operator + format((variableTable[operand][0] + 0xC800),'x')
#Needs to return 51200
print("AFTER LOC: {}".format(tokens[i]))
if(operand in labelTable):
operand = labelTable[operand]
tokens[i] = str(operator) + str(operand)
return tokens
def handleLabel(tokens):
label = tokens[0][:-1]
labelTable[label] = len(output)
def updateVar(tokens):
print("Updating variable")
#maybe replace with regex if we're feeling brave
def assemble(tokens):
#print(tokens)
#If its variable creation:
if(tokens[0] in types and tokens[2] == "="):
createVar(tokens)
#If its variable assigning:
elif(tokens[0] in variableTable and tokens[1] == "="):
updateVar(tokens)
#If its a label
elif(tokens[0][-1] == ":"):
handleLabel(tokens)
#If its a comment
elif(tokens[0][0] == ";"):
pass
#Else its an opcode
else:
tokens = resolveVariables(tokens)
try:
eval(tokens[0])(tokens)
except NameError:
error("Unknown Input {}".format(tokens))
def getOperand(token):
addrMode = getAddrMode(token)
operand = ""
operator = ""
if(addrMode == "IMP" or addrMode == "ABS"):
operand = token
else:
operator = token[0]
operand = token[1:]
print(operand)
return addrMode,operator,int(operand,16)
#Handle a Standard 8 Bit opcode
#General function for opcodes with the format [opcode] [operand] where
#the operand is 8 bits.
def handleStd8bitOpcode(tokens):
print("Tokens in 8bit std: {}".format(tokens))
addrMode,_,operand = getOperand(tokens[1])
print(type(operand))
print("Operand in 8bit: {}".format(operand))
if(addrMode == "IMM" or addrMode == "REG"):
output.append(InstructionSet[addrMode + " " + tokens[0]])
output.append(operand)
else:
output.append(InstructionSet[addrMode + " " + tokens[0]])
output.append((operand >> 8) & 0xFF)
output.append(operand & 0xFF)
#Handle a Standard 16 Bit opcode
#General function for opcodes with the format [opcode] [operand] where
#the operand is 16 bits.
def handleStd16bitOpcode(tokens):
addrMode,_,operand = getOperand(tokens[1])
if(addrMode == "REG"):
output.append(InstructionSet[addrMode + " " + tokens[0]])
output.append((operand & 0xFF))
else:
output.append(InstructionSet[addrMode + " " + tokens[0]])
output.append((operand >> 8) & 0xFF)
output.append((operand & 0xFF))
# -= INSTRUCTION FUNCTIONS =- #
def NOP(tokens):
output.append(InstructionSet["IMP NOP"])
def HCF(tokens):
output.append(InstructionSet["IMP HCF"])
def PUSH(tokens):
output.append(InstructionSet["IMP PUSH"])
def POP(tokens):
output.append(InstructionSet["IMP POP"])
def SWP(tokens):
output.append(InstructionSet["REG SWP"])
_,_,operand = getOperand(tokens[1])
output.append(operand)
_,_,operand = getOperand(tokens[2])
output.append(operand)
def CALL(tokens):
handleStd16bitOpcode(tokens)
def RET(tokens):
output.append(InstructionSet["IMP RET"])
def ADD(tokens):
handleStd8bitOpcode(tokens)
def ADDX(tokens):
handleStd16bitOpcode(tokens)
def SUB(tokens):
handleStd8bitOpcode(tokens)
def SUBX(tokens):
handleStd16bitOpcode(tokens)
def MUL(tokens):
handleStd8bitOpcode(tokens)
def MULX(tokens):
handleStd16bitOpcode(tokens)
def DIV(tokens):
handleStd8bitOpcode(tokens)
def DIVX(tokens):
handleStd16bitOpcode(tokens)
def MOV(tokens):
addrMode1,_,operand1 = getOperand(tokens[1])
addrMode2,_,operand2 = getOperand(tokens[2])
output.append(InstructionSet[addrMode1 + " " + addrMode2 + " " + tokens[0]])
if(addrMode1 == "IMM" or addrMode1 == "REG"):
output.append(operand1)
else:
output.append((operand1 >> 8) & 0xFF)
output.append((operand1 & 0xFF))
if(addrMode2 == "IMM" or addrMode2 == "REG"):
output.append(operand2)
else:
output.append((operand2 >> 8) & 0xFF)
output.append((operand2 & 0xFF))
def MOVX(tokens):
addrMode1,_,operand1 = getOperand(tokens[1])
addrMode2,_,operand2 = getOperand(tokens[2])
output.append(InstructionSet[addrMode1 + " " + addrMode2 + " " + tokens[0]])
if(addrMode1 == "REG"):
output.append((operand1 & 0xFF))
else:
output.append((operand1 >> 8) & 0xFF)
output.append((operand1 & 0xFF))
if(addrMode2 == "REG"):
output.append((operand2 & 0xFF))
else:
output.append((operand2 >> 8) & 0xFF)
output.append((operand2 & 0xFF))
def SHL(tokens):
handleStd8bitOpcode(tokens)
def SHLX(tokens):
handleStd16bitOpcode(tokens)
def AND(tokens):
handleStd8bitOpcode(tokens)
def OR(tokens):
handleStd8bitOpcode(tokens)
def XOR(tokens):
handleStd8bitOpcode(tokens)
def SHR(tokens):
handleStd8bitOpcode(tokens)
def CMP(tokens):
handleStd8bitOpcode(tokens)
def CMPX(tokens):
handleStd16bitOpcode(tokens)
def SHRX(tokens):
handleStd16bitOpcode(tokens)
def JMP(tokens):
handleStd16bitOpcode(tokens)
def JZS(tokens):
handleStd16bitOpcode(tokens)
def JNZ(tokens):
handleStd16bitOpcode(tokens)
def JCS(tokens):
handleStd16bitOpcode(tokens)
def JNC(tokens):
handleStd16bitOpcode(tokens)
def JOS(tokens):
handleStd16bitOpcode(tokens)
def JNO(tokens):
handleStd16bitOpcode(tokens)
def JNS(tokens):
handleStd16bitOpcode(tokens)
def JNN(tokens):
handleStd16bitOpcode(tokens)
def JLS(tokens):
handleStd16bitOpcode(tokens)
def JNL(tokens):
handleStd16bitOpcode(tokens)
def JES (tokens):
handleStd16bitOpcode(tokens)
def CSF(tokens):
output.append(InstructionSet["IMP CSF"])
def CZF(tokens):
output.append(InstructionSet["IMP CZF"])
def SZF(tokens):
output.append(InstructionSet["IMP SZF"])
def CNF(tokens):
output.append(InstructionSet["IMP CNF"])
def SNF(tokens):
output.append(InstructionSet["IMP SNF"])
def CCF(tokens):
output.append(InstructionSet["IMP CCF"])
def SCF(tokens):
output.append(InstructionSet["IMP SCF"])
def COF(tokens):
output.append(InstructionSet["IMP COF"])
def SOF(tokens):
output.append(InstructionSet["IMP SOF"])
def CLF(tokens):
output.append(InstructionSet["IMP CLF"])
def SLF(tokens):
output.append(InstructionSet["IMP SLF"])
def CIF(tokens):
output.append(InstructionSet["IMP CIF"])
def SIF(tokens):
output.append(InstructionSet["IMP SIF"])
def CEF(tokens):
output.append(InstructionSet["IMP CEF"])
def INC(tokens):
handleStd8bitOpcode(tokens)
def INCX(tokens):
handleStd16bitOpcode(tokens)
def DEC(tokens):
handleStd8bitOpcode(tokens)
def DECX(tokens):
handleStd16bitOpcode(tokens)
#Writes an error to the console. Stops exectuion
def error(msg):
print("[Error] line {} : {} ".format(currentLine,msg))
sys.exit()
#Writes a warning to the console
def warning(msg):
print("[Warning] line {} : {}".format(currentLine,msg))
def main():
global currentLine
if(len(sys.argv) < 2):
print("Usage: cosasm <input file> <output file>")
return -1
inputFile = open(sys.argv[1],'r')
instructions = list(inputFile)
inputFile.close()
print("Read " + str(len(instructions)) + " lines")
#-= Go through the file =-#
for i in range(0, len(instructions)):
tokens = instructions[i].split()
print(tokens)
assemble(tokens)
currentLine += 1
print("\n-= Output: =-")
for i in range(0, len(output)):
print(hex(output[i]),end=" ")
print("")
print('\n-= Var Table: =-')
for i in range(0 , len(variables)):
print(hex(variables[i]),end=" ")
print("")
currentSize = len(output)
#From where output ends, to 0xC800
for i in range (0, 0xC800-currentSize):
output.append(0x00)
#Put variables in
for i in range (0, len(variables)):
output.append(variables[i])
print("Loc: {} Value:{} ".format(hex(len(output)),hex(variables[i])))
#Finish the rest of the file
currentSize = len(output)
for i in range (0, 0xFFFF-currentSize):
output.append(0x00)
currentSize = len(output)
print("Current Size {} ".format(currentSize))
if(len(sys.argv) == 2):
outputFile = open('output.bin','w+b')
else:
outputFile = open(sys.argv[2],'w+b')
outputFile.write(output)
outputFile.close()
if __name__ == "__main__":
main()
|
93253d737d1f34322fddf155222cdac824eb5617
|
5c76189530289332d0e80f2261b4358b71f915eb
|
/tests/conftest.py
|
7a869da16ee7f6f377c38cf336a106c5df5cc130
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/qlib
|
4fe53e6512b1846d44efed67a2365438b860ed02
|
4c30e5827b74bcc45f14cf3ae0c1715459ed09ae
|
refs/heads/main
| 2023-08-29T05:29:23.001517
| 2023-08-24T13:24:50
| 2023-08-24T13:24:50
| 287,463,830
| 12,822
| 2,342
|
MIT
| 2023-09-06T09:03:31
| 2020-08-14T06:46:00
|
Python
|
UTF-8
|
Python
| false
| false
| 247
|
py
|
conftest.py
|
import os
import sys
"""Ignore RL tests on non-linux platform."""
collect_ignore = []
if sys.platform != "linux":
for root, dirs, files in os.walk("rl"):
for file in files:
collect_ignore.append(os.path.join(root, file))
|
eabee2c81a1a46a21a2dc19a27358621545a7305
|
b8441dc1987be9e64fa3081d456b2a3060ec44d1
|
/mars/lib/uhashring/ring.py
|
c1201ca26ea79d35c9fc34e4400d3675eec721d9
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mars-project/mars
|
f99fefbce999d58a9249bc72046787a9731c9c73
|
c36c53fa22e10ef9477d9c454401a2f281375f31
|
refs/heads/master
| 2023-07-23T00:23:55.133015
| 2023-07-03T11:44:54
| 2023-07-03T11:44:54
| 160,543,708
| 2,704
| 362
|
Apache-2.0
| 2023-09-11T07:57:35
| 2018-12-05T16:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 11,109
|
py
|
ring.py
|
# -*- coding: utf-8 -*-
from bisect import bisect
from .ring_ketama import KetamaRing
from .ring_meta import MetaRing
class HashRing(object):
"""Implement a consistent hashing ring."""
def __init__(self, nodes=[], **kwargs):
"""Create a new HashRing given the implementation.
:param nodes: nodes used to create the continuum (see doc for format).
:param hash_fn: use this callable function to hash keys, can be set to
'ketama' to use the ketama compatible implementation.
:param vnodes: default number of vnodes per node.
:param weight_fn: use this function to calculate the node's weight.
"""
hash_fn = kwargs.get("hash_fn", None)
vnodes = kwargs.get("vnodes", None)
weight_fn = kwargs.get("weight_fn", None)
if hash_fn == "ketama":
if vnodes is None:
vnodes = 40
self.runtime = KetamaRing()
else:
if vnodes is None:
vnodes = 160
self.runtime = MetaRing(hash_fn)
self._default_vnodes = vnodes
self.hashi = self.runtime.hashi
if weight_fn and not hasattr(weight_fn, "__call__"):
raise TypeError("weight_fn should be a callable function")
self._weight_fn = weight_fn
if self._configure_nodes(nodes):
self.runtime._create_ring(self.runtime._nodes.items())
def _configure_nodes(self, nodes):
"""Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
"""
if isinstance(nodes, str):
nodes = [nodes]
elif not isinstance(nodes, (dict, list)):
raise ValueError(
f"nodes configuration should be a list or a dict, got {type(nodes)}"
)
conf_changed = False
for node in nodes:
conf = {
"hostname": node,
"instance": None,
"nodename": node,
"port": None,
"vnodes": self._default_vnodes,
"weight": 1,
}
current_conf = self.runtime._nodes.get(node, {})
nodename = node
# new node, trigger a ring update
if not current_conf:
conf_changed = True
# complex config
if isinstance(nodes, dict):
node_conf = nodes[node]
if isinstance(node_conf, int):
conf["weight"] = node_conf
elif isinstance(node_conf, dict):
for k, v in node_conf.items():
if k in conf:
conf[k] = v
# changing those config trigger a ring update
if k in ["nodename", "vnodes", "weight"]:
if current_conf.get(k) != v:
conf_changed = True
else:
raise ValueError(
"node configuration should be a dict or an int,"
f" got {type(node_conf)}"
)
if self._weight_fn:
conf["weight"] = self._weight_fn(**conf)
# changing the weight of a node trigger a ring update
if current_conf.get("weight") != conf["weight"]:
conf_changed = True
self.runtime._nodes[nodename] = conf
return conf_changed
def __delitem__(self, nodename):
"""Remove the given node.
:param nodename: the node name.
"""
self.runtime._remove_node(nodename)
remove_node = __delitem__
def __getitem__(self, key):
"""Returns the instance of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "instance")
get_node_instance = __getitem__
def __setitem__(self, nodename, conf={"weight": 1}):
"""Add the given node with its associated configuration.
:param nodename: the node name.
:param conf: the node configuration.
"""
if self._configure_nodes({nodename: conf}):
self.runtime._create_ring([(nodename, self._nodes[nodename])])
add_node = __setitem__
def _get_pos(self, key):
"""Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
"""
p = bisect(self.runtime._keys, self.hashi(key))
if p == len(self.runtime._keys):
return 0
else:
return p
def _get(self, key, what):
"""Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight
"""
if not self.runtime._ring:
return None
pos = self._get_pos(key)
if what == "pos":
return pos
nodename = self.runtime._ring[self.runtime._keys[pos]]
if what in ["hostname", "instance", "port", "weight"]:
return self.runtime._nodes[nodename][what]
elif what == "dict":
return self.runtime._nodes[nodename]
elif what == "nodename":
return nodename
elif what == "tuple":
return (self.runtime._keys[pos], nodename)
def get(self, key):
"""Returns the node object dict matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "dict")
def get_instances(self):
"""Returns a list of the instances of all the configured nodes."""
return [
c.get("instance") for c in self.runtime._nodes.values() if c.get("instance")
]
def get_key(self, key):
"""Alias of ketama hashi method, returns the hash of the given key.
This method is present for hash_ring compatibility.
:param key: the key to look for.
"""
return self.hashi(key)
def get_node(self, key):
"""Returns the node name of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "nodename")
def get_node_hostname(self, key):
"""Returns the hostname of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "hostname")
def get_node_port(self, key):
"""Returns the port of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "port")
def get_node_pos(self, key):
"""Returns the index position of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "pos")
def get_node_weight(self, key):
"""Returns the weight of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "weight")
def get_nodes(self):
"""Returns a list of the names of all the configured nodes."""
return self.runtime._nodes.keys()
def get_points(self):
"""Returns a ketama compatible list of (position, nodename) tuples."""
return [(k, self.runtime._ring[k]) for k in self.runtime._keys]
def get_server(self, key):
"""Returns a ketama compatible (position, nodename) tuple.
:param key: the key to look for.
"""
return self._get(key, "tuple")
def iterate_nodes(self, key, distinct=True):
"""hash_ring compatibility implementation.
Given a string key it returns the nodes as a generator that
can hold the key.
The generator iterates one time through the ring
starting at the correct position.
if `distinct` is set, then the nodes returned will be unique,
i.e. no virtual copies will be returned.
"""
if not self.runtime._ring:
yield None
else:
for node in self.range(key, unique=distinct):
yield node["nodename"]
def print_continuum(self):
"""Prints a ketama compatible continuum report."""
numpoints = len(self.runtime._keys)
if numpoints:
print(f"Numpoints in continuum: {numpoints}")
else:
print("Continuum empty")
for p in self.get_points():
point, node = p
print(f"{node} ({point})")
def range(self, key, size=None, unique=True):
"""Returns a generator of nodes' configuration available
in the continuum/ring.
:param key: the key to look for.
:param size: limit the list to at most this number of nodes.
:param unique: a node may only appear once in the list (default True).
"""
all_nodes = set()
if unique:
size = size or len(self.runtime._nodes)
else:
all_nodes = []
pos = self._get_pos(key)
for key in self.runtime._keys[pos:]:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
else:
for i, key in enumerate(self.runtime._keys):
if i < pos:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
def regenerate(self):
self.runtime._create_ring(self.runtime._nodes.items())
@property
def conf(self):
return self.runtime._nodes
nodes = conf
@property
def distribution(self):
return self.runtime._distribution
@property
def ring(self):
return self.runtime._ring
continuum = ring
@property
def size(self):
return len(self.runtime._ring)
@property
def _ring(self):
return self.runtime._ring
@property
def _nodes(self):
return self.runtime._nodes
@property
def _keys(self):
return self.runtime._keys
|
31fadcc61cb8b221b9737be5b9d9882524408c93
|
ecaaa7d9137225b05a59bc1127e8f5351a26fd3f
|
/tests/tests/test_app_settings.py
|
fb77ade55da29682d4b8562ac963be5fb1fd1ec5
|
[
"MIT"
] |
permissive
|
klis87/django-cloudinary-storage
|
ad787f08c3e77f5d4358419ad24e4f3c69bffdca
|
6f9ad3f61656df3c22ae1815d35c84c7fd4d264a
|
refs/heads/master
| 2022-10-25T18:54:57.856582
| 2022-09-01T11:14:08
| 2022-09-01T11:54:47
| 63,619,003
| 131
| 37
|
MIT
| 2022-09-01T11:54:47
| 2016-07-18T16:25:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,396
|
py
|
test_app_settings.py
|
import os
from django.test import SimpleTestCase, override_settings
from django.core.exceptions import ImproperlyConfigured
from cloudinary_storage.app_settings import set_credentials
from cloudinary_storage import app_settings
from .test_helpers import import_mock
mock = import_mock()
@mock.patch.dict(os.environ, {}, clear=True)
class SetCredentialsWithoutEnvVariablesTests(SimpleTestCase):
def assert_incomplete_settings_raise_error(self, settings):
with self.assertRaises(ImproperlyConfigured):
set_credentials(settings)
def test_missing_CLOUD_NAME_setting_raises_error(self):
self.assert_incomplete_settings_raise_error({'API_SECRET': 'secret', 'API_KEY': 'key'})
def test_missing_API_SECRET_setting_raises_error(self):
self.assert_incomplete_settings_raise_error({'CLOUD_NAME': 'name', 'API_KEY': 'key'})
def test_missing_API_KEY_setting_raises_error(self):
self.assert_incomplete_settings_raise_error({'CLOUD_NAME': 'name', 'API_SECRET': 'secret'})
@mock.patch('cloudinary_storage.app_settings.cloudinary.config')
def test_proper_configuration_correctly_sets_credentials(self, config_mock):
set_credentials({'CLOUD_NAME': 'name', 'API_SECRET': 'secret', 'API_KEY': 'key'})
config_mock.assert_called_once_with(cloud_name='name', api_secret='secret', api_key='key')
class SetCredentialsWithEnvVariablesTests(SimpleTestCase):
def assert_incomplete_settings_raise_error(self, settings={}):
with self.assertRaises(ImproperlyConfigured):
set_credentials(settings)
@mock.patch.dict(os.environ, {'CLOUDINARY_API_SECRET': 'secret', 'CLOUDINARY_API_KEY': 'key'}, clear=True)
def test_missing_CLOUD_NAME_variable_raises_error(self):
self.assert_incomplete_settings_raise_error()
@mock.patch.dict(os.environ, {'CLOUDINARY_CLOUD_NAME': 'name', 'CLOUDINARY_API_KEY': 'key'}, clear=True)
def test_missing_API_SECRET_variable_raises_error(self):
self.assert_incomplete_settings_raise_error()
@mock.patch.dict(os.environ, {'CLOUDINARY_CLOUD_NAME': 'name', 'CLOUDINARY_API_SECRET': 'secret'}, clear=True)
def test_missing_API_KEY_variable_raises_error(self):
self.assert_incomplete_settings_raise_error()
@mock.patch('cloudinary_storage.app_settings.cloudinary.config')
@mock.patch.dict(os.environ,
{
'CLOUDINARY_CLOUD_NAME': 'name',
'CLOUDINARY_API_SECRET': 'secret',
'CLOUDINARY_API_KEY': 'key'
},
clear=True)
def test_complete_set_of_env_variables_doesnt_raise_error(self, config_mock):
set_credentials({})
self.assertFalse(config_mock.called)
@mock.patch.dict(os.environ, {'CLOUDINARY_URL': 'my-url'}, clear=True)
@mock.patch('cloudinary_storage.app_settings.cloudinary.config')
def test_CLOUDINARY_URL_env_variable_doesnt_raise_error(self, config_mock):
set_credentials({})
self.assertFalse(config_mock.called)
class OverrideSettingsTests(SimpleTestCase):
def test_override_settings(self):
old_value = app_settings.MEDIA_TAG
with override_settings(CLOUDINARY_STORAGE={'MEDIA_TAG': 'test'}):
self.assertEqual(app_settings.MEDIA_TAG, 'test')
self.assertEqual(app_settings.MEDIA_TAG, old_value)
|
bfd50b9b06bd27d79656521d35e5462c43255683
|
fce81b804cae23f525a5ad4370b684bf0dc531a5
|
/numpy/f2py/cfuncs.py
|
f89793061bada6c0a6352282664f6753afeff0bb
|
[
"Zlib",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
numpy/numpy
|
ba2abcc1d2d46affbb6aabe5aed6407b4b57507e
|
dc2ff125493777a1084044e6cd6857a42ee323d4
|
refs/heads/main
| 2023-09-05T10:10:52.767363
| 2023-09-04T18:03:29
| 2023-09-04T18:03:29
| 908,607
| 25,725
| 11,968
|
BSD-3-Clause
| 2023-09-14T21:26:09
| 2010-09-13T23:02:39
|
Python
|
UTF-8
|
Python
| false
| false
| 51,719
|
py
|
cfuncs.py
|
#!/usr/bin/env python3
"""
C declarations, CPP macros, and C functions for f2py2e.
Only required declarations/macros/functions will be used.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 11:42:34 $
Pearu Peterson
"""
import sys
import copy
from . import __version__
f2py_version = __version__.version
errmess = sys.stderr.write
##################### Definitions ##################
outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [],
'userincludes': [],
'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [],
'commonhooks': []}
needs = {}
includes0 = {'includes0': '/*need_includes0*/'}
includes = {'includes': '/*need_includes*/'}
userincludes = {'userincludes': '/*need_userincludes*/'}
typedefs = {'typedefs': '/*need_typedefs*/'}
typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'}
cppmacros = {'cppmacros': '/*need_cppmacros*/'}
cfuncs = {'cfuncs': '/*need_cfuncs*/'}
callbacks = {'callbacks': '/*need_callbacks*/'}
f90modhooks = {'f90modhooks': '/*need_f90modhooks*/',
'initf90modhooksstatic': '/*initf90modhooksstatic*/',
'initf90modhooksdynamic': '/*initf90modhooksdynamic*/',
}
commonhooks = {'commonhooks': '/*need_commonhooks*/',
'initcommonhooks': '/*need_initcommonhooks*/',
}
############ Includes ###################
includes0['math.h'] = '#include <math.h>'
includes0['string.h'] = '#include <string.h>'
includes0['setjmp.h'] = '#include <setjmp.h>'
includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API
#include "arrayobject.h"'''
includes['npy_math.h'] = '#include "numpy/npy_math.h"'
includes['arrayobject.h'] = '#include "fortranobject.h"'
includes['stdarg.h'] = '#include <stdarg.h>'
############# Type definitions ###############
typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;'
typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;'
typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;'
typedefs['signed_char'] = 'typedef signed char signed_char;'
typedefs['long_long'] = """\
#if defined(NPY_OS_WIN32)
typedef __int64 long_long;
#else
typedef long long long_long;
typedef unsigned long long unsigned_long_long;
#endif
"""
typedefs['unsigned_long_long'] = """\
#if defined(NPY_OS_WIN32)
typedef __uint64 long_long;
#else
typedef unsigned long long unsigned_long_long;
#endif
"""
typedefs['long_double'] = """\
#ifndef _LONG_DOUBLE
typedef long double long_double;
#endif
"""
typedefs[
'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;'
typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;'
typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;'
typedefs['string'] = """typedef char * string;"""
typedefs['character'] = """typedef char character;"""
############### CPP macros ####################
cppmacros['CFUNCSMESS'] = """\
#ifdef DEBUGCFUNCS
#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess);
#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\
PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
fprintf(stderr,\"\\n\");
#else
#define CFUNCSMESS(mess)
#define CFUNCSMESSPY(mess,obj)
#endif
"""
cppmacros['F_FUNC'] = """\
#if defined(PREPEND_FORTRAN)
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) _##F
#else
#define F_FUNC(f,F) _##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) _##F##_
#else
#define F_FUNC(f,F) _##f##_
#endif
#endif
#else
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) F
#else
#define F_FUNC(f,F) f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) F##_
#else
#define F_FUNC(f,F) f##_
#endif
#endif
#endif
#if defined(UNDERSCORE_G77)
#define F_FUNC_US(f,F) F_FUNC(f##_,F##_)
#else
#define F_FUNC_US(f,F) F_FUNC(f,F)
#endif
"""
cppmacros['F_WRAPPEDFUNC'] = """\
#if defined(PREPEND_FORTRAN)
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F
#else
#define F_WRAPPEDFUNC(f,F) _f2pywrap##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_
#else
#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_
#endif
#endif
#else
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F
#else
#define F_WRAPPEDFUNC(f,F) f2pywrap##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_
#else
#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_
#endif
#endif
#endif
#if defined(UNDERSCORE_G77)
#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_)
#else
#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F)
#endif
"""
cppmacros['F_MODFUNC'] = """\
#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f
#else
#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _
#endif
#endif
#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f
#else
#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _
#endif
#endif
#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) f ## .in. ## m
#else
#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _
#endif
#endif
/*
#if defined(UPPERCASE_FORTRAN)
#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F)
#else
#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f)
#endif
*/
#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f))
"""
cppmacros['SWAPUNSAFE'] = """\
#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\
(size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\
(size_t)(a) = ((size_t)(a) ^ (size_t)(b))
"""
cppmacros['SWAP'] = """\
#define SWAP(a,b,t) {\\
t *c;\\
c = a;\\
a = b;\\
b = c;}
"""
# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) &
# NPY_ARRAY_C_CONTIGUOUS)'
cppmacros['PRINTPYOBJERR'] = """\
#define PRINTPYOBJERR(obj)\\
fprintf(stderr,\"#modulename#.error is related to \");\\
PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
fprintf(stderr,\"\\n\");
"""
cppmacros['MINMAX'] = """\
#ifndef max
#define max(a,b) ((a > b) ? (a) : (b))
#endif
#ifndef min
#define min(a,b) ((a < b) ? (a) : (b))
#endif
#ifndef MAX
#define MAX(a,b) ((a > b) ? (a) : (b))
#endif
#ifndef MIN
#define MIN(a,b) ((a < b) ? (a) : (b))
#endif
"""
cppmacros['len..'] = """\
/* See fortranobject.h for definitions. The macros here are provided for BC. */
#define rank f2py_rank
#define shape f2py_shape
#define fshape f2py_shape
#define len f2py_len
#define flen f2py_flen
#define slen f2py_slen
#define size f2py_size
"""
cppmacros[
'pyobj_from_char1'] = '#define pyobj_from_char1(v) (PyLong_FromLong(v))'
cppmacros[
'pyobj_from_short1'] = '#define pyobj_from_short1(v) (PyLong_FromLong(v))'
needs['pyobj_from_int1'] = ['signed_char']
cppmacros['pyobj_from_int1'] = '#define pyobj_from_int1(v) (PyLong_FromLong(v))'
cppmacros[
'pyobj_from_long1'] = '#define pyobj_from_long1(v) (PyLong_FromLong(v))'
needs['pyobj_from_long_long1'] = ['long_long']
cppmacros['pyobj_from_long_long1'] = """\
#ifdef HAVE_LONG_LONG
#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v))
#else
#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long.
#define pyobj_from_long_long1(v) (PyLong_FromLong(v))
#endif
"""
needs['pyobj_from_long_double1'] = ['long_double']
cppmacros[
'pyobj_from_long_double1'] = '#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))'
cppmacros[
'pyobj_from_double1'] = '#define pyobj_from_double1(v) (PyFloat_FromDouble(v))'
cppmacros[
'pyobj_from_float1'] = '#define pyobj_from_float1(v) (PyFloat_FromDouble(v))'
needs['pyobj_from_complex_long_double1'] = ['complex_long_double']
cppmacros[
'pyobj_from_complex_long_double1'] = '#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_complex_double1'] = ['complex_double']
cppmacros[
'pyobj_from_complex_double1'] = '#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_complex_float1'] = ['complex_float']
cppmacros[
'pyobj_from_complex_float1'] = '#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_string1'] = ['string']
cppmacros[
'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))'
needs['pyobj_from_string1size'] = ['string']
cppmacros[
'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUnicode_FromStringAndSize((char *)v, len))'
needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR']
cppmacros['TRYPYARRAYTEMPLATE'] = """\
/* New SciPy */
#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break;
#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;
#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break;
#define TRYPYARRAYTEMPLATE(ctype,typecode) \\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
if (!PyArray_Check(obj)) return -1;\\
if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\
switch (PyArray_TYPE(arr)) {\\
case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\
case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=*v; break;\\
case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=*v; break;\\
case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\
case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\
case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\
case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\
case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=*v; break;\\
case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=*v; break;\\
case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=*v; break;\\
case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\
case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\
case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\
case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\
case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\
case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\
case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\
case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\
default: return -2;\\
};\\
return 1
"""
needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR']
cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\
#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;
#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
if (!PyArray_Check(obj)) return -1;\\
if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
if (PyArray_DESCR(arr)->type==typecode) {\\
*(ctype *)(PyArray_DATA(arr))=(*v).r;\\
*(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\
return 1;\\
}\\
switch (PyArray_TYPE(arr)) {\\
case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r;\\
*(npy_double *)(PyArray_DATA(arr)+sizeof(npy_double))=(*v).i;\\
break;\\
case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r;\\
*(npy_float *)(PyArray_DATA(arr)+sizeof(npy_float))=(*v).i;\\
break;\\
case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\
case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;\\
*(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;\\
break;\\
case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\
default: return -2;\\
};\\
return -1;
"""
# cppmacros['NUMFROMARROBJ']="""\
# define NUMFROMARROBJ(typenum,ctype) \\
# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
# if (arr) {\\
# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\
# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
# goto capi_fail;\\
# } else {\\
# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\
# }\\
# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
# return 1;\\
# }
# """
# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ
# cppmacros['CNUMFROMARROBJ']="""\
# define CNUMFROMARROBJ(typenum,ctype) \\
# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
# if (arr) {\\
# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\
# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
# goto capi_fail;\\
# } else {\\
# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\
# }\\
# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
# return 1;\\
# }
# """
needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR']
cppmacros['GETSTRFROMPYTUPLE'] = """\
#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\
PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\
if (rv_cb_str == NULL)\\
goto capi_fail;\\
if (PyBytes_Check(rv_cb_str)) {\\
str[len-1]='\\0';\\
STRINGCOPYN((str),PyBytes_AS_STRING((PyBytesObject*)rv_cb_str),(len));\\
} else {\\
PRINTPYOBJERR(rv_cb_str);\\
PyErr_SetString(#modulename#_error,\"string object expected\");\\
goto capi_fail;\\
}\\
}
"""
cppmacros['GETSCALARFROMPYTUPLE'] = """\
#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\
if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\
if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\
goto capi_fail;\\
}
"""
cppmacros['FAILNULL'] = """\\
#define FAILNULL(p) do { \\
if ((p) == NULL) { \\
PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\
goto capi_fail; \\
} \\
} while (0)
"""
needs['MEMCOPY'] = ['string.h', 'FAILNULL']
cppmacros['MEMCOPY'] = """\
#define MEMCOPY(to,from,n)\\
do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0)
"""
cppmacros['STRINGMALLOC'] = """\
#define STRINGMALLOC(str,len)\\
if ((str = (string)malloc(len+1)) == NULL) {\\
PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\
goto capi_fail;\\
} else {\\
(str)[len] = '\\0';\\
}
"""
cppmacros['STRINGFREE'] = """\
#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0)
"""
needs['STRINGPADN'] = ['string.h']
cppmacros['STRINGPADN'] = """\
/*
STRINGPADN replaces null values with padding values from the right.
`to` must have size of at least N bytes.
If the `to[N-1]` has null value, then replace it and all the
preceding, nulls with the given padding.
STRINGPADN(to, N, PADDING, NULLVALUE) is an inverse operation.
*/
#define STRINGPADN(to, N, NULLVALUE, PADDING) \\
do { \\
int _m = (N); \\
char *_to = (to); \\
for (_m -= 1; _m >= 0 && _to[_m] == NULLVALUE; _m--) { \\
_to[_m] = PADDING; \\
} \\
} while (0)
"""
needs['STRINGCOPYN'] = ['string.h', 'FAILNULL']
cppmacros['STRINGCOPYN'] = """\
/*
STRINGCOPYN copies N bytes.
`to` and `from` buffers must have sizes of at least N bytes.
*/
#define STRINGCOPYN(to,from,N) \\
do { \\
int _m = (N); \\
char *_to = (to); \\
char *_from = (from); \\
FAILNULL(_to); FAILNULL(_from); \\
(void)strncpy(_to, _from, _m); \\
} while (0)
"""
needs['STRINGCOPY'] = ['string.h', 'FAILNULL']
cppmacros['STRINGCOPY'] = """\
#define STRINGCOPY(to,from)\\
do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0)
"""
cppmacros['CHECKGENERIC'] = """\
#define CHECKGENERIC(check,tcheck,name) \\
if (!(check)) {\\
PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
/*goto capi_fail;*/\\
} else """
cppmacros['CHECKARRAY'] = """\
#define CHECKARRAY(check,tcheck,name) \\
if (!(check)) {\\
PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
/*goto capi_fail;*/\\
} else """
cppmacros['CHECKSTRING'] = """\
#define CHECKSTRING(check,tcheck,name,show,var)\\
if (!(check)) {\\
char errstring[256];\\
sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\
PyErr_SetString(#modulename#_error, errstring);\\
/*goto capi_fail;*/\\
} else """
cppmacros['CHECKSCALAR'] = """\
#define CHECKSCALAR(check,tcheck,name,show,var)\\
if (!(check)) {\\
char errstring[256];\\
sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\
PyErr_SetString(#modulename#_error,errstring);\\
/*goto capi_fail;*/\\
} else """
# cppmacros['CHECKDIMS']="""\
# define CHECKDIMS(dims,rank) \\
# for (int i=0;i<(rank);i++)\\
# if (dims[i]<0) {\\
# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\
# goto capi_fail;\\
# }
# """
cppmacros[
'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
cppmacros['OLDPYNUM'] = """\
#ifdef OLDPYNUM
#error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html
#endif
"""
cppmacros["F2PY_THREAD_LOCAL_DECL"] = """\
#ifndef F2PY_THREAD_LOCAL_DECL
#if defined(_MSC_VER)
#define F2PY_THREAD_LOCAL_DECL __declspec(thread)
#elif defined(NPY_OS_MINGW)
#define F2PY_THREAD_LOCAL_DECL __thread
#elif defined(__STDC_VERSION__) \\
&& (__STDC_VERSION__ >= 201112L) \\
&& !defined(__STDC_NO_THREADS__) \\
&& (!defined(__GLIBC__) || __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 12)) \\
&& !defined(NPY_OS_OPENBSD) && !defined(NPY_OS_HAIKU)
/* __STDC_NO_THREADS__ was first defined in a maintenance release of glibc 2.12,
see https://lists.gnu.org/archive/html/commit-hurd/2012-07/msg00180.html,
so `!defined(__STDC_NO_THREADS__)` may give false positive for the existence
of `threads.h` when using an older release of glibc 2.12
See gh-19437 for details on OpenBSD */
#include <threads.h>
#define F2PY_THREAD_LOCAL_DECL thread_local
#elif defined(__GNUC__) \\
&& (__GNUC__ > 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 4)))
#define F2PY_THREAD_LOCAL_DECL __thread
#endif
#endif
"""
################# C functions ###############
cfuncs['calcarrindex'] = """\
static int calcarrindex(int *i,PyArrayObject *arr) {
int k,ii = i[0];
for (k=1; k < PyArray_NDIM(arr); k++)
ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */
return ii;
}"""
cfuncs['calcarrindextr'] = """\
static int calcarrindextr(int *i,PyArrayObject *arr) {
int k,ii = i[PyArray_NDIM(arr)-1];
for (k=1; k < PyArray_NDIM(arr); k++)
ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */
return ii;
}"""
cfuncs['forcomb'] = """\
static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache;
static int initforcomb(npy_intp *dims,int nd,int tr) {
int k;
if (dims==NULL) return 0;
if (nd<0) return 0;
forcombcache.nd = nd;
forcombcache.d = dims;
forcombcache.tr = tr;
if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
for (k=1;k<nd;k++) {
forcombcache.i[k] = forcombcache.i_tr[nd-k-1] = 0;
}
forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1;
return 1;
}
static int *nextforcomb(void) {
int j,*i,*i_tr,k;
int nd=forcombcache.nd;
if ((i=forcombcache.i) == NULL) return NULL;
if ((i_tr=forcombcache.i_tr) == NULL) return NULL;
if (forcombcache.d == NULL) return NULL;
i[0]++;
if (i[0]==forcombcache.d[0]) {
j=1;
while ((j<nd) && (i[j]==forcombcache.d[j]-1)) j++;
if (j==nd) {
free(i);
free(i_tr);
return NULL;
}
for (k=0;k<j;k++) i[k] = i_tr[nd-k-1] = 0;
i[j]++;
i_tr[nd-j-1]++;
} else
i_tr[nd-1]++;
if (forcombcache.tr) return i_tr;
return i;
}"""
needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string']
cfuncs['try_pyarr_from_string'] = """\
/*
try_pyarr_from_string copies str[:len(obj)] to the data of an `ndarray`.
If obj is an `ndarray`, it is assumed to be contiguous.
If the specified len==-1, str must be null-terminated.
*/
static int try_pyarr_from_string(PyObject *obj,
const string str, const int len) {
#ifdef DEBUGCFUNCS
fprintf(stderr, "try_pyarr_from_string(str='%s', len=%d, obj=%p)\\n",
(char*)str,len, obj);
#endif
if (PyArray_Check(obj)) {
PyArrayObject *arr = (PyArrayObject *)obj;
assert(ISCONTIGUOUS(arr));
string buf = PyArray_DATA(arr);
npy_intp n = len;
if (n == -1) {
/* Assuming null-terminated str. */
n = strlen(str);
}
if (n > PyArray_NBYTES(arr)) {
n = PyArray_NBYTES(arr);
}
STRINGCOPYN(buf, str, n);
return 1;
}
capi_fail:
PRINTPYOBJERR(obj);
PyErr_SetString(#modulename#_error, \"try_pyarr_from_string failed\");
return 0;
}
"""
needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN']
cfuncs['string_from_pyobj'] = """\
/*
Create a new string buffer `str` of at most length `len` from a
Python string-like object `obj`.
The string buffer has given size (len) or the size of inistr when len==-1.
The string buffer is padded with blanks: in Fortran, trailing blanks
are insignificant contrary to C nulls.
*/
static int
string_from_pyobj(string *str, int *len, const string inistr, PyObject *obj,
const char *errmess)
{
PyObject *tmp = NULL;
string buf = NULL;
npy_intp n = -1;
#ifdef DEBUGCFUNCS
fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",
(char*)str, *len, (char *)inistr, obj);
#endif
if (obj == Py_None) {
n = strlen(inistr);
buf = inistr;
}
else if (PyArray_Check(obj)) {
PyArrayObject *arr = (PyArrayObject *)obj;
if (!ISCONTIGUOUS(arr)) {
PyErr_SetString(PyExc_ValueError,
\"array object is non-contiguous.\");
goto capi_fail;
}
n = PyArray_NBYTES(arr);
buf = PyArray_DATA(arr);
n = strnlen(buf, n);
}
else {
if (PyBytes_Check(obj)) {
tmp = obj;
Py_INCREF(tmp);
}
else if (PyUnicode_Check(obj)) {
tmp = PyUnicode_AsASCIIString(obj);
}
else {
PyObject *tmp2;
tmp2 = PyObject_Str(obj);
if (tmp2) {
tmp = PyUnicode_AsASCIIString(tmp2);
Py_DECREF(tmp2);
}
else {
tmp = NULL;
}
}
if (tmp == NULL) goto capi_fail;
n = PyBytes_GET_SIZE(tmp);
buf = PyBytes_AS_STRING(tmp);
}
if (*len == -1) {
/* TODO: change the type of `len` so that we can remove this */
if (n > NPY_MAX_INT) {
PyErr_SetString(PyExc_OverflowError,
"object too large for a 32-bit int");
goto capi_fail;
}
*len = n;
}
else if (*len < n) {
/* discard the last (len-n) bytes of input buf */
n = *len;
}
if (n < 0 || *len < 0 || buf == NULL) {
goto capi_fail;
}
STRINGMALLOC(*str, *len); // *str is allocated with size (*len + 1)
if (n < *len) {
/*
Pad fixed-width string with nulls. The caller will replace
nulls with blanks when the corresponding argument is not
intent(c).
*/
memset(*str + n, '\\0', *len - n);
}
STRINGCOPYN(*str, buf, n);
Py_XDECREF(tmp);
return 1;
capi_fail:
Py_XDECREF(tmp);
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = #modulename#_error;
}
PyErr_SetString(err, errmess);
}
return 0;
}
"""
cfuncs['character_from_pyobj'] = """\
static int
character_from_pyobj(character* v, PyObject *obj, const char *errmess) {
if (PyBytes_Check(obj)) {
/* empty bytes has trailing null, so dereferencing is always safe */
*v = PyBytes_AS_STRING(obj)[0];
return 1;
} else if (PyUnicode_Check(obj)) {
PyObject* tmp = PyUnicode_AsASCIIString(obj);
if (tmp != NULL) {
*v = PyBytes_AS_STRING(tmp)[0];
Py_DECREF(tmp);
return 1;
}
} else if (PyArray_Check(obj)) {
PyArrayObject* arr = (PyArrayObject*)obj;
if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) {
*v = PyArray_BYTES(arr)[0];
return 1;
} else if (F2PY_IS_UNICODE_ARRAY(arr)) {
// TODO: update when numpy will support 1-byte and
// 2-byte unicode dtypes
PyObject* tmp = PyUnicode_FromKindAndData(
PyUnicode_4BYTE_KIND,
PyArray_BYTES(arr),
(PyArray_NBYTES(arr)>0?1:0));
if (tmp != NULL) {
if (character_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
}
} else if (PySequence_Check(obj)) {
PyObject* tmp = PySequence_GetItem(obj,0);
if (tmp != NULL) {
if (character_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
}
{
/* TODO: This error (and most other) error handling needs cleaning. */
char mess[F2PY_MESSAGE_BUFFER_SIZE];
strcpy(mess, errmess);
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = PyExc_TypeError;
Py_INCREF(err);
}
else {
Py_INCREF(err);
PyErr_Clear();
}
sprintf(mess + strlen(mess),
" -- expected str|bytes|sequence-of-str-or-bytes, got ");
f2py_describe(obj, mess + strlen(mess));
PyErr_SetString(err, mess);
Py_DECREF(err);
}
return 0;
}
"""
needs['char_from_pyobj'] = ['int_from_pyobj']
cfuncs['char_from_pyobj'] = """\
static int
char_from_pyobj(char* v, PyObject *obj, const char *errmess) {
int i = 0;
if (int_from_pyobj(&i, obj, errmess)) {
*v = (char)i;
return 1;
}
return 0;
}
"""
needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char']
cfuncs['signed_char_from_pyobj'] = """\
static int
signed_char_from_pyobj(signed_char* v, PyObject *obj, const char *errmess) {
int i = 0;
if (int_from_pyobj(&i, obj, errmess)) {
*v = (signed_char)i;
return 1;
}
return 0;
}
"""
needs['short_from_pyobj'] = ['int_from_pyobj']
cfuncs['short_from_pyobj'] = """\
static int
short_from_pyobj(short* v, PyObject *obj, const char *errmess) {
int i = 0;
if (int_from_pyobj(&i, obj, errmess)) {
*v = (short)i;
return 1;
}
return 0;
}
"""
cfuncs['int_from_pyobj'] = """\
static int
int_from_pyobj(int* v, PyObject *obj, const char *errmess)
{
PyObject* tmp = NULL;
if (PyLong_Check(obj)) {
*v = Npy__PyLong_AsInt(obj);
return !(*v == -1 && PyErr_Occurred());
}
tmp = PyNumber_Long(obj);
if (tmp) {
*v = Npy__PyLong_AsInt(tmp);
Py_DECREF(tmp);
return !(*v == -1 && PyErr_Occurred());
}
if (PyComplex_Check(obj)) {
PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
}
else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
}
else if (PySequence_Check(obj)) {
PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
}
if (tmp) {
if (int_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = #modulename#_error;
}
PyErr_SetString(err, errmess);
}
return 0;
}
"""
cfuncs['long_from_pyobj'] = """\
static int
long_from_pyobj(long* v, PyObject *obj, const char *errmess) {
PyObject* tmp = NULL;
if (PyLong_Check(obj)) {
*v = PyLong_AsLong(obj);
return !(*v == -1 && PyErr_Occurred());
}
tmp = PyNumber_Long(obj);
if (tmp) {
*v = PyLong_AsLong(tmp);
Py_DECREF(tmp);
return !(*v == -1 && PyErr_Occurred());
}
if (PyComplex_Check(obj)) {
PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
}
else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
}
else if (PySequence_Check(obj)) {
PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
}
if (tmp) {
if (long_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = #modulename#_error;
}
PyErr_SetString(err, errmess);
}
return 0;
}
"""
needs['long_long_from_pyobj'] = ['long_long']
cfuncs['long_long_from_pyobj'] = """\
static int
long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess)
{
PyObject* tmp = NULL;
if (PyLong_Check(obj)) {
*v = PyLong_AsLongLong(obj);
return !(*v == -1 && PyErr_Occurred());
}
tmp = PyNumber_Long(obj);
if (tmp) {
*v = PyLong_AsLongLong(tmp);
Py_DECREF(tmp);
return !(*v == -1 && PyErr_Occurred());
}
if (PyComplex_Check(obj)) {
PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
}
else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
}
else if (PySequence_Check(obj)) {
PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
}
if (tmp) {
if (long_long_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = #modulename#_error;
}
PyErr_SetString(err,errmess);
}
return 0;
}
"""
needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double']
cfuncs['long_double_from_pyobj'] = """\
static int
long_double_from_pyobj(long_double* v, PyObject *obj, const char *errmess)
{
double d=0;
if (PyArray_CheckScalar(obj)){
if PyArray_IsScalar(obj, LongDouble) {
PyArray_ScalarAsCtype(obj, v);
return 1;
}
else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) {
(*v) = *((npy_longdouble *)PyArray_DATA(obj));
return 1;
}
}
if (double_from_pyobj(&d, obj, errmess)) {
*v = (long_double)d;
return 1;
}
return 0;
}
"""
cfuncs['double_from_pyobj'] = """\
static int
double_from_pyobj(double* v, PyObject *obj, const char *errmess)
{
PyObject* tmp = NULL;
if (PyFloat_Check(obj)) {
*v = PyFloat_AsDouble(obj);
return !(*v == -1.0 && PyErr_Occurred());
}
tmp = PyNumber_Float(obj);
if (tmp) {
*v = PyFloat_AsDouble(tmp);
Py_DECREF(tmp);
return !(*v == -1.0 && PyErr_Occurred());
}
if (PyComplex_Check(obj)) {
PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
}
else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
}
else if (PySequence_Check(obj)) {
PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
}
if (tmp) {
if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
if (err==NULL) err = #modulename#_error;
PyErr_SetString(err,errmess);
}
return 0;
}
"""
needs['float_from_pyobj'] = ['double_from_pyobj']
cfuncs['float_from_pyobj'] = """\
static int
float_from_pyobj(float* v, PyObject *obj, const char *errmess)
{
double d=0.0;
if (double_from_pyobj(&d,obj,errmess)) {
*v = (float)d;
return 1;
}
return 0;
}
"""
needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double',
'complex_double_from_pyobj', 'npy_math.h']
cfuncs['complex_long_double_from_pyobj'] = """\
static int
complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess)
{
complex_double cd = {0.0,0.0};
if (PyArray_CheckScalar(obj)){
if PyArray_IsScalar(obj, CLongDouble) {
PyArray_ScalarAsCtype(obj, v);
return 1;
}
else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) {
(*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(obj))));
(*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(obj))));
return 1;
}
}
if (complex_double_from_pyobj(&cd,obj,errmess)) {
(*v).r = (long_double)cd.r;
(*v).i = (long_double)cd.i;
return 1;
}
return 0;
}
"""
needs['complex_double_from_pyobj'] = ['complex_double', 'npy_math.h']
cfuncs['complex_double_from_pyobj'] = """\
static int
complex_double_from_pyobj(complex_double* v, PyObject *obj, const char *errmess) {
Py_complex c;
if (PyComplex_Check(obj)) {
c = PyComplex_AsCComplex(obj);
(*v).r = c.real;
(*v).i = c.imag;
return 1;
}
if (PyArray_IsScalar(obj, ComplexFloating)) {
if (PyArray_IsScalar(obj, CFloat)) {
npy_cfloat new;
PyArray_ScalarAsCtype(obj, &new);
(*v).r = (double)npy_crealf(new);
(*v).i = (double)npy_cimagf(new);
}
else if (PyArray_IsScalar(obj, CLongDouble)) {
npy_clongdouble new;
PyArray_ScalarAsCtype(obj, &new);
(*v).r = (double)npy_creall(new);
(*v).i = (double)npy_cimagl(new);
}
else { /* if (PyArray_IsScalar(obj, CDouble)) */
PyArray_ScalarAsCtype(obj, v);
}
return 1;
}
if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */
PyArrayObject *arr;
if (PyArray_Check(obj)) {
arr = (PyArrayObject *)PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE);
}
else {
arr = (PyArrayObject *)PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE));
}
if (arr == NULL) {
return 0;
}
(*v).r = npy_creal(*(((npy_cdouble *)PyArray_DATA(arr))));
(*v).i = npy_cimag(*(((npy_cdouble *)PyArray_DATA(arr))));
Py_DECREF(arr);
return 1;
}
/* Python does not provide PyNumber_Complex function :-( */
(*v).i = 0.0;
if (PyFloat_Check(obj)) {
(*v).r = PyFloat_AsDouble(obj);
return !((*v).r == -1.0 && PyErr_Occurred());
}
if (PyLong_Check(obj)) {
(*v).r = PyLong_AsDouble(obj);
return !((*v).r == -1.0 && PyErr_Occurred());
}
if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) {
PyObject *tmp = PySequence_GetItem(obj,0);
if (tmp) {
if (complex_double_from_pyobj(v,tmp,errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
}
{
PyObject* err = PyErr_Occurred();
if (err==NULL)
err = PyExc_TypeError;
PyErr_SetString(err,errmess);
}
return 0;
}
"""
needs['complex_float_from_pyobj'] = [
'complex_float', 'complex_double_from_pyobj']
cfuncs['complex_float_from_pyobj'] = """\
static int
complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess)
{
complex_double cd={0.0,0.0};
if (complex_double_from_pyobj(&cd,obj,errmess)) {
(*v).r = (float)cd.r;
(*v).i = (float)cd.i;
return 1;
}
return 0;
}
"""
cfuncs['try_pyarr_from_character'] = """\
static int try_pyarr_from_character(PyObject* obj, character* v) {
PyArrayObject *arr = (PyArrayObject*)obj;
if (!obj) return -2;
if (PyArray_Check(obj)) {
if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) {
*(character *)(PyArray_DATA(arr)) = *v;
return 1;
}
}
{
char mess[F2PY_MESSAGE_BUFFER_SIZE];
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = PyExc_ValueError;
strcpy(mess, "try_pyarr_from_character failed"
" -- expected bytes array-scalar|array, got ");
f2py_describe(obj, mess + strlen(mess));
PyErr_SetString(err, mess);
}
}
return 0;
}
"""
needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char']
cfuncs[
'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n'
needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char']
cfuncs[
'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n'
needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n'
needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n'
needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n'
needs['try_pyarr_from_long_long'] = [
'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long']
cfuncs[
'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n'
needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n'
needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n'
needs['try_pyarr_from_complex_float'] = [
'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float']
cfuncs[
'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n'
needs['try_pyarr_from_complex_double'] = [
'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double']
cfuncs[
'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX']
# create the list of arguments to be used when calling back to python
cfuncs['create_cb_arglist'] = """\
static int
create_cb_arglist(PyObject* fun, PyTupleObject* xa , const int maxnofargs,
const int nofoptargs, int *nofargs, PyTupleObject **args,
const char *errmess)
{
PyObject *tmp = NULL;
PyObject *tmp_fun = NULL;
Py_ssize_t tot, opt, ext, siz, i, di = 0;
CFUNCSMESS(\"create_cb_arglist\\n\");
tot=opt=ext=siz=0;
/* Get the total number of arguments */
if (PyFunction_Check(fun)) {
tmp_fun = fun;
Py_INCREF(tmp_fun);
}
else {
di = 1;
if (PyObject_HasAttrString(fun,\"im_func\")) {
tmp_fun = PyObject_GetAttrString(fun,\"im_func\");
}
else if (PyObject_HasAttrString(fun,\"__call__\")) {
tmp = PyObject_GetAttrString(fun,\"__call__\");
if (PyObject_HasAttrString(tmp,\"im_func\"))
tmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
else {
tmp_fun = fun; /* built-in function */
Py_INCREF(tmp_fun);
tot = maxnofargs;
if (PyCFunction_Check(fun)) {
/* In case the function has a co_argcount (like on PyPy) */
di = 0;
}
if (xa != NULL)
tot += PyTuple_Size((PyObject *)xa);
}
Py_XDECREF(tmp);
}
else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) {
tot = maxnofargs;
if (xa != NULL)
tot += PyTuple_Size((PyObject *)xa);
tmp_fun = fun;
Py_INCREF(tmp_fun);
}
else if (F2PyCapsule_Check(fun)) {
tot = maxnofargs;
if (xa != NULL)
ext = PyTuple_Size((PyObject *)xa);
if(ext>0) {
fprintf(stderr,\"extra arguments tuple cannot be used with PyCapsule call-back\\n\");
goto capi_fail;
}
tmp_fun = fun;
Py_INCREF(tmp_fun);
}
}
if (tmp_fun == NULL) {
fprintf(stderr,
\"Call-back argument must be function|instance|instance.__call__|f2py-function \"
\"but got %s.\\n\",
((fun == NULL) ? \"NULL\" : Py_TYPE(fun)->tp_name));
goto capi_fail;
}
if (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) {
PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\");
Py_DECREF(tmp);
if (tmp_argcount == NULL) {
goto capi_fail;
}
tot = PyLong_AsSsize_t(tmp_argcount) - di;
Py_DECREF(tmp_argcount);
}
}
/* Get the number of optional arguments */
if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) {
if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\")))
opt = PyTuple_Size(tmp);
Py_XDECREF(tmp);
}
/* Get the number of extra arguments */
if (xa != NULL)
ext = PyTuple_Size((PyObject *)xa);
/* Calculate the size of call-backs argument list */
siz = MIN(maxnofargs+ext,tot);
*nofargs = MAX(0,siz-ext);
#ifdef DEBUGCFUNCS
fprintf(stderr,
\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),\"
\"tot,opt,ext,siz,nofargs = %d(-%d), %zd, %zd, %zd, %zd, %d\\n\",
maxnofargs, nofoptargs, tot, opt, ext, siz, *nofargs);
#endif
if (siz < tot-opt) {
fprintf(stderr,
\"create_cb_arglist: Failed to build argument list \"
\"(siz) with enough arguments (tot-opt) required by \"
\"user-supplied function (siz,tot,opt=%zd, %zd, %zd).\\n\",
siz, tot, opt);
goto capi_fail;
}
/* Initialize argument list */
*args = (PyTupleObject *)PyTuple_New(siz);
for (i=0;i<*nofargs;i++) {
Py_INCREF(Py_None);
PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None);
}
if (xa != NULL)
for (i=(*nofargs);i<siz;i++) {
tmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs));
Py_INCREF(tmp);
PyTuple_SET_ITEM(*args,i,tmp);
}
CFUNCSMESS(\"create_cb_arglist-end\\n\");
Py_DECREF(tmp_fun);
return 1;
capi_fail:
if (PyErr_Occurred() == NULL)
PyErr_SetString(#modulename#_error, errmess);
Py_XDECREF(tmp_fun);
return 0;
}
"""
def buildcfuncs():
from .capi_maps import c2capi_map
for k in c2capi_map.keys():
m = 'pyarr_from_p_%s1' % k
cppmacros[
m] = '#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))' % (m, c2capi_map[k])
k = 'string'
m = 'pyarr_from_p_%s1' % k
# NPY_CHAR compatibility, NPY_STRING with itemsize 1
cppmacros[
m] = '#define %s(v,dims) (PyArray_New(&PyArray_Type, 1, dims, NPY_STRING, NULL, v, 1, NPY_ARRAY_CARRAY, NULL))' % (m)
############ Auxiliary functions for sorting needs ###################
def append_needs(need, flag=1):
# This function modifies the contents of the global `outneeds` dict.
if isinstance(need, list):
for n in need:
append_needs(n, flag)
elif isinstance(need, str):
if not need:
return
if need in includes0:
n = 'includes0'
elif need in includes:
n = 'includes'
elif need in typedefs:
n = 'typedefs'
elif need in typedefs_generated:
n = 'typedefs_generated'
elif need in cppmacros:
n = 'cppmacros'
elif need in cfuncs:
n = 'cfuncs'
elif need in callbacks:
n = 'callbacks'
elif need in f90modhooks:
n = 'f90modhooks'
elif need in commonhooks:
n = 'commonhooks'
else:
errmess('append_needs: unknown need %s\n' % (repr(need)))
return
if need in outneeds[n]:
return
if flag:
tmp = {}
if need in needs:
for nn in needs[need]:
t = append_needs(nn, 0)
if isinstance(t, dict):
for nnn in t.keys():
if nnn in tmp:
tmp[nnn] = tmp[nnn] + t[nnn]
else:
tmp[nnn] = t[nnn]
for nn in tmp.keys():
for nnn in tmp[nn]:
if nnn not in outneeds[nn]:
outneeds[nn] = [nnn] + outneeds[nn]
outneeds[n].append(need)
else:
tmp = {}
if need in needs:
for nn in needs[need]:
t = append_needs(nn, flag)
if isinstance(t, dict):
for nnn in t.keys():
if nnn in tmp:
tmp[nnn] = t[nnn] + tmp[nnn]
else:
tmp[nnn] = t[nnn]
if n not in tmp:
tmp[n] = []
tmp[n].append(need)
return tmp
else:
errmess('append_needs: expected list or string but got :%s\n' %
(repr(need)))
def get_needs():
# This function modifies the contents of the global `outneeds` dict.
res = {}
for n in outneeds.keys():
out = []
saveout = copy.copy(outneeds[n])
while len(outneeds[n]) > 0:
if outneeds[n][0] not in needs:
out.append(outneeds[n][0])
del outneeds[n][0]
else:
flag = 0
for k in outneeds[n][1:]:
if k in needs[outneeds[n][0]]:
flag = 1
break
if flag:
outneeds[n] = outneeds[n][1:] + [outneeds[n][0]]
else:
out.append(outneeds[n][0])
del outneeds[n][0]
if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \
and outneeds[n] != []:
print(n, saveout)
errmess(
'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n')
out = out + saveout
break
saveout = copy.copy(outneeds[n])
if out == []:
out = [n]
res[n] = out
return res
|
4170e6b53bce96976f648ff8ef97202d03707ded
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/notebook/services/config/manager.py
|
59f267dd9615573a95e5855ce06aadf5fa609032
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,056
|
py
|
manager.py
|
"""Manager to read and modify frontend config data in JSON files.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os.path
from notebook.config_manager import BaseJSONConfigManager, recursive_update
from jupyter_core.paths import jupyter_config_dir, jupyter_config_path
from traitlets import Unicode, Instance, List, observe, default
from traitlets.config import LoggingConfigurable
class ConfigManager(LoggingConfigurable):
"""Config Manager used for storing notebook frontend config"""
# Public API
def get(self, section_name):
"""Get the config from all config sections."""
config = {}
# step through back to front, to ensure front of the list is top priority
for p in self.read_config_path[::-1]:
cm = BaseJSONConfigManager(config_dir=p)
recursive_update(config, cm.get(section_name))
return config
def set(self, section_name, data):
"""Set the config only to the user's config."""
return self.write_config_manager.set(section_name, data)
def update(self, section_name, new_data):
"""Update the config only to the user's config."""
return self.write_config_manager.update(section_name, new_data)
# Private API
read_config_path = List(Unicode())
@default('read_config_path')
def _default_read_config_path(self):
return [os.path.join(p, 'nbconfig') for p in jupyter_config_path()]
write_config_dir = Unicode()
@default('write_config_dir')
def _default_write_config_dir(self):
return os.path.join(jupyter_config_dir(), 'nbconfig')
write_config_manager = Instance(BaseJSONConfigManager)
@default('write_config_manager')
def _default_write_config_manager(self):
return BaseJSONConfigManager(config_dir=self.write_config_dir)
@observe('write_config_dir')
def _update_write_config_dir(self, change):
self.write_config_manager = BaseJSONConfigManager(config_dir=self.write_config_dir)
|
c282cf290f9bf42b73587b09a519f6dbf610368d
|
360328d098a74581d0822fba489dd15e0d4e7ab3
|
/src/richie/plugins/simple_picture/helpers.py
|
669919c3bece4558a314d76de0090c436cc97a7e
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
openfun/richie
|
0cef545486267bfb40e75e5fb2ce2a74f85a53ff
|
f2d46fc46b271eb3b4d565039a29c15ba15f027c
|
refs/heads/master
| 2023-08-31T23:51:37.714179
| 2023-08-29T15:25:04
| 2023-08-29T15:48:39
| 111,388,461
| 238
| 96
|
MIT
| 2023-09-13T12:48:53
| 2017-11-20T09:23:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,894
|
py
|
helpers.py
|
"""SimplePicture plugin for DjangoCMS."""
from .defaults import SIMPLEPICTURE_PRESETS
def get_picture_info(instance, preset_name):
"""
Compute picture information for a given preset defined in settings.
A preset is of the form:
"default": {
"src": {"size": (1000, 1000), "crop": "smart"}, # easythumbnail options
"srcset": [
{
"options": {"size": (800, 800), "crop": "smart"},
"descriptor": "800w",
},
{
"options": {"size": (600, 600), "crop": "smart"},
"descriptor": "600w",
},
],
"sizes": "100vw", # e.g 1000px or 100vw
}
"""
# Bail out if the picture does not have an image as that's the object we use to get
# all the information we need to return any picture info.
if not instance.picture:
return None
thumbnailer = instance.picture.easy_thumbnails_thumbnailer
# Look for the preset in settings and fallback to "default"
preset = SIMPLEPICTURE_PRESETS.get(preset_name, SIMPLEPICTURE_PRESETS["default"])
# Complete picture information with thumbnails url calculated according to what is
# defined in the preset
picture_info = {}
location_dict = {"subject_location": instance.picture.subject_location}
# - src
options = preset["src"].copy()
options.update(location_dict)
picture_info["src"] = thumbnailer.get_thumbnail(options).url
# - srcset
srcset = []
for info in preset.get("srcset", []):
options = info["options"].copy()
options.update(location_dict)
url = thumbnailer.get_thumbnail(options).url
srcset.append(f"{url:s} {info['descriptor']:s}")
picture_info["srcset"] = ", ".join(srcset) if srcset else None
# - sizes
picture_info["sizes"] = preset.get("sizes")
return picture_info
|
49886d898b2e7b62fc86264df9a26c8aaa210491
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/pytorch/source/caffe2/python/operator_test/checkpoint_test.py
|
dc42d471f722b41ac3421b69abe5d7fdae8df104
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
checkpoint_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import os
import shutil
import tempfile
import unittest
class CheckpointTest(unittest.TestCase):
"""A simple test case to make sure that the checkpoint behavior is correct.
"""
def testCheckpoint(self):
temp_root = tempfile.mkdtemp()
net = core.Net("test_checkpoint")
# Note(jiayq): I am being a bit lazy here and am using the old iter
# convention that does not have an input. Optionally change it to the
# new style if needed.
net.Iter([], "iter")
net.ConstantFill([], "value", shape=[1, 2, 3])
net.Checkpoint(["iter", "value"], [],
db=os.path.join(temp_root, "test_checkpoint_at_%05d"),
db_type="leveldb", every=10, absolute_path=True)
self.assertTrue(workspace.CreateNet(net))
for i in range(100):
self.assertTrue(workspace.RunNet("test_checkpoint"))
for i in range(1, 10):
# Print statements are only for debugging purposes.
# print("Asserting %d" % i)
# print(os.path.join(temp_root, "test_checkpoint_at_%05d" % (i * 10)))
self.assertTrue(os.path.exists(
os.path.join(temp_root, "test_checkpoint_at_%05d" % (i * 10))))
# Finally, clean up.
shutil.rmtree(temp_root)
if __name__ == "__main__":
import unittest
unittest.main()
|
30f99c6e8b0ed683da472c8b5d6c0186e3067171
|
ce1c91c33d9b612e97361527e5a974996208c90d
|
/glue/plugins/dendro_viewer/qt/layer_style_editor.py
|
c72401393fea855c3d7c60828fc2430612b5b098
|
[
"BSD-3-Clause"
] |
permissive
|
glue-viz/glue
|
5f52faaf91e1ca4822d3983b6a4b9b60e8807f38
|
1a5c7676c025a1a025068b806f6f90ed53bba543
|
refs/heads/main
| 2023-09-04T09:24:00.519833
| 2023-08-17T09:40:04
| 2023-08-17T09:40:04
| 1,768,238
| 609
| 149
|
NOASSERTION
| 2023-09-13T20:56:14
| 2011-05-18T20:58:54
|
Python
|
UTF-8
|
Python
| false
| false
| 320
|
py
|
layer_style_editor.py
|
import warnings
from glue.utils.error import GlueDeprecationWarning
warnings.warn('Importing from glue.plugins.dendro_viewer.qt.layer_style_editor is deprecated, use glue_qt.plugins.dendro_viewer.layer_style_editor instead', GlueDeprecationWarning)
from glue_qt.plugins.dendro_viewer.layer_style_editor import * # noqa
|
a3f091f94e9b1174f3bce17bf2b149ac9d110005
|
c1b32c2e36f64c6d7c352242e9e1f6b16ea02da5
|
/tf_explain/utils/image.py
|
f7638e5bc383c6acff5a2eb0d72e85b2af44c207
|
[
"MIT"
] |
permissive
|
sicara/tf-explain
|
00246fde5305ad96611fdba23563c97fbc4cdc38
|
9d7d1e900ec3e3e4b5338fbc43dfb93539acecc2
|
refs/heads/master
| 2023-08-21T22:58:55.150396
| 2022-06-30T08:14:18
| 2022-06-30T08:14:18
| 196,956,879
| 1,033
| 122
|
MIT
| 2022-06-30T08:14:19
| 2019-07-15T08:26:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
image.py
|
""" Module for image operations """
import numpy as np
import tensorflow as tf
def apply_grey_patch(image, top_left_x, top_left_y, patch_size):
"""
Replace a part of the image with a grey patch.
Args:
image (numpy.ndarray): Input image
top_left_x (int): Top Left X position of the applied box
top_left_y (int): Top Left Y position of the applied box
patch_size (int): Size of patch to apply
Returns:
numpy.ndarray: Patched image
"""
patched_image = np.array(image, copy=True)
patched_image[
top_left_y : top_left_y + patch_size, top_left_x : top_left_x + patch_size, :
] = 127.5
return patched_image
@tf.function
def transform_to_normalized_grayscale(tensor):
"""
Transform tensor over RGB axis to grayscale.
Args:
tensor (tf.Tensor): 4D-Tensor with shape (batch_size, H, W, 3)
Returns:
tf.Tensor: 4D-Tensor of grayscale tensor, with shape (batch_size, H, W, 1)
"""
grayscale_tensor = tf.reduce_sum(tensor, axis=-1)
normalized_tensor = tf.cast(
255 * tf.image.per_image_standardization(grayscale_tensor), tf.uint8
)
return normalized_tensor
|
0579c0c75cb17a67241359ceacea8afc70efb2dc
|
40dd8330e5f78c4348bbddc2c5acfd59d793dd51
|
/projects/gid_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_gid-256x256.py
|
70cb6005f81a4fa859a2abf8cc1b4afe59893ab3
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmsegmentation
|
0d12092312e2c465ede1fd7dd9847b6f2b37049c
|
30a3f94f3e2916e27fa38c67cc3b8c69c1893fe8
|
refs/heads/main
| 2023-09-04T10:54:52.299711
| 2023-07-24T07:28:21
| 2023-07-24T07:28:21
| 272,133,018
| 6,534
| 2,375
|
Apache-2.0
| 2023-09-14T01:22:32
| 2020-06-14T04:32:33
|
Python
|
UTF-8
|
Python
| false
| false
| 557
|
py
|
deeplabv3plus_r101-d8_4xb2-240k_gid-256x256.py
|
_base_ = [
'../../../configs/_base_/models/deeplabv3plus_r50-d8.py',
'./_base_/datasets/gid.py', '../../../configs/_base_/default_runtime.py',
'../../../configs/_base_/schedules/schedule_240k.py'
]
custom_imports = dict(imports=['projects.gid_dataset.mmseg.datasets.gid'])
crop_size = (256, 256)
data_preprocessor = dict(size=crop_size)
model = dict(
data_preprocessor=data_preprocessor,
pretrained='open-mmlab://resnet101_v1c',
backbone=dict(depth=101),
decode_head=dict(num_classes=6),
auxiliary_head=dict(num_classes=6))
|
8a4bfeb7521340eaf3d6d46978514a3c9cbf18bb
|
21e67cc6406a3c8063fae691a5f8b5c46bf5d53f
|
/hcloud/deprecation/domain.py
|
cd1b53656f4a672c8a24dd639acddee277bbadd0
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hetznercloud/hcloud-python
|
6b524f4519fb933d65fbf039e7f78b251b493d10
|
982e35b47e36e24b140fcad84817a4b28dc09c2d
|
refs/heads/main
| 2023-09-03T14:08:03.920174
| 2023-08-25T14:14:21
| 2023-08-25T14:14:21
| 162,585,627
| 221
| 49
|
MIT
| 2023-09-12T07:02:46
| 2018-12-20T13:48:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
domain.py
|
from __future__ import annotations
from dateutil.parser import isoparse
from ..core import BaseDomain
class DeprecationInfo(BaseDomain):
"""Describes if, when & how the resources was deprecated. If this field is set to ``None`` the resource is not
deprecated. If it has a value, it is considered deprecated.
:param announced: datetime
Date of when the deprecation was announced.
:param unavailable_after: datetime
After the time in this field, the resource will not be available from the general listing endpoint of the
resource type, and it can not be used in new resources. For example, if this is an image, you can not create
new servers with this image after the mentioned date.
"""
__slots__ = (
"announced",
"unavailable_after",
)
def __init__(
self,
announced: str | None = None,
unavailable_after: str | None = None,
):
self.announced = isoparse(announced) if announced else None
self.unavailable_after = (
isoparse(unavailable_after) if unavailable_after else None
)
|
99e4b6436859d0250607172794a3f67adb17e0da
|
c9502eb1420a32a7bf36a32391d6b66b7018f9a7
|
/docker/jupyter_config.py
|
ef5a95f6ee3f0520d37b28d717ebab30f1a4f5a4
|
[
"MIT"
] |
permissive
|
kundajelab/dragonn
|
ae81f00885494e61b81a6ce359130e2283038364
|
d2442a27a3991a18717ef199e3197d6692f14c33
|
refs/heads/master
| 2023-08-13T21:52:20.555859
| 2022-07-08T03:29:04
| 2022-07-08T03:29:04
| 60,218,942
| 262
| 95
|
MIT
| 2023-03-24T22:57:13
| 2016-06-02T00:03:18
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 108
|
py
|
jupyter_config.py
|
c.NotebookApp.allow_origin = '*'
c.NotebookApp.ip = '*'
c.NotebookApp.port = 8888
c.NotebookApp.token = ''
|
37966bd679c8c763fb31432962446eeb25e6e619
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/torch/testing/_internal/opinfo/definitions/sparse.py
|
ac042d3343f929d60aee9db5575c1964c09a90f0
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 34,059
|
py
|
sparse.py
|
import os
import torch
from torch.testing import make_tensor # noqa: F401
from torch.testing._internal.opinfo.core import ( # noqa: F401
BinaryUfuncInfo,
ErrorInput,
generate_elementwise_binary_tensors,
ReductionOpInfo,
sample_inputs_reduction,
SampleInput,
)
def _check_validate(op_info, sample):
def _check_fail(sample):
try:
op_info(
sample.sample_input.input,
*sample.sample_input.args,
**sample.sample_input.kwargs,
)
except sample.error_type:
pass
except Exception as msg:
raise AssertionError(
f"{op_info.name} on {sample.sample_input=} expected exception "
f"{sample.error_type}: {sample.error_regex}, got {type(msg).__name__}: {msg}"
)
else:
raise AssertionError(
f"{op_info.name} on {sample.sample_input=} expected exception "
f"{sample.error_type}: {sample.error_regex}, got none."
)
def _check_success(sample):
try:
op_info(sample.input, *sample.args, **sample.kwargs)
except Exception as msg:
raise AssertionError(
f"{op_info.name} on {sample=} expected to succeed "
f", got {type(msg).__name__}: {msg}"
)
if isinstance(sample, ErrorInput):
_check_fail(sample)
else:
_check_success(sample)
def _sample_inputs_sparse(
sample_inputs,
maybe_failing_sample_inputs,
validate_sample_input,
op_info,
*args,
**kwargs,
):
check_validate = (
os.environ.get("PYTORCH_TEST_CHECK_VALIDATE_SPARSE_SAMPLES", "0") == "1"
)
for sample in sample_inputs(op_info, *args, **kwargs):
sample = validate_sample_input(op_info, sample, check_validate=check_validate)
if isinstance(sample, SampleInput):
yield sample
# Error inputs are handled in error_inputs_sparse
for sample in maybe_failing_sample_inputs(op_info, *args, **kwargs):
sample = validate_sample_input(op_info, sample, check_validate=check_validate)
if isinstance(sample, SampleInput):
yield sample
def _error_inputs_sparse(
maybe_failing_sample_inputs, validate_sample_input, op_info, *args, **kwargs
):
check_validate = (
os.environ.get("PYTORCH_TEST_CHECK_VALIDATE_SPARSE_SAMPLES", "0") == "1"
)
for sample in maybe_failing_sample_inputs(op_info, *args, **kwargs):
sample = validate_sample_input(op_info, sample, check_validate=check_validate)
if isinstance(sample, ErrorInput):
yield sample
# Sample inputs are handled in sample_inputs_sparse
def _apply_requires_grad_to_samples(sample_inputs):
"""Decorator to _maybe_failing_sample_inputs_... generator functions
that clones and sets requires_grad argument to tensors in sample
input arguments. This is needed when the generated samples share
tensor instances.
"""
def wrapper(op_info, device, dtype, requires_grad, layout, **kwargs):
def apply_requires_grad(x):
if (
not isinstance(x, torch.Tensor)
or x.requires_grad
or not requires_grad
or not (x.is_floating_point() or x.is_complex())
):
return x
return x.detach().clone().requires_grad_(requires_grad)
if requires_grad:
for sample_input in sample_inputs(
op_info, device, dtype, requires_grad, layout, **kwargs
):
yield sample_input.transform(apply_requires_grad)
else:
yield from sample_inputs(
op_info, device, dtype, requires_grad, layout, **kwargs
)
return wrapper
def sample_inputs_sparse_reduction(
op_info, device, dtype, requires_grad, layout, blocksize=None, **kwargs
):
"""Sample inputs for reduction operations on sparse tensors."""
layout_name = str(layout).split(".", 1)[-1].rsplit("_coo", 1)[0]
op_supports_layout = getattr(op_info, "supports_" + layout_name)
if not op_supports_layout:
return
for sample_input in sample_inputs_reduction(
op_info, device, dtype, requires_grad, **kwargs
):
if sample_input.input.ndim == 0:
# scalar sparse tensors are not supported
continue
if layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}:
if sample_input.input.ndim < 2:
# conversion to sparse compressed tensors requires at
# least 2 dimensional tensors
continue
if sample_input.input.ndim > 2 and (sample_input.input == 0).any():
# Skip batched sparse compressed samples that contain
# explicit zeros because to_sparse(layout=..) will
# fail, see gh-98495.
# TODO: remove this if-block after gh-98495 is fixed.
continue
if layout in {torch.sparse_bsr, torch.sparse_bsc} and blocksize is None:
blocksize = (1, 1)
yield SampleInput(
sample_input.input.detach()
.to_sparse(layout=layout, blocksize=blocksize)
.requires_grad_(requires_grad),
args=sample_input.args,
kwargs=sample_input.kwargs,
)
if layout is torch.sparse_coo and (dtype.is_floating_point or dtype.is_complex):
# uncoalesced samples
inp = sample_input.input.detach().to_sparse(layout=layout)
inp = torch.sparse_coo_tensor(
inp.indices().repeat(1, 2),
inp.values().repeat(2),
inp.shape,
dtype=inp.dtype,
device=inp.device,
)
assert not inp.is_coalesced()
yield SampleInput(
inp.requires_grad_(requires_grad),
args=sample_input.args,
kwargs=sample_input.kwargs,
)
if sample_input.input.ndim > 2:
# hybrid samples
yield SampleInput(
sample_input.input.detach()
.to_sparse(
layout=layout,
blocksize=blocksize,
dense_dim=sample_input.input.ndim - 2,
)
.requires_grad_(requires_grad),
args=sample_input.args,
kwargs=sample_input.kwargs,
)
def _validate_sample_input_sparse_reduction(op_info, sample, check_validate=False):
"""Return the specified sample when it is valid and supported by the
operation. Otherwise, return the sample as ErrorInput instance.
When check_validate is True, the result is validated against
calling the op on the sample.
"""
UNSPECIFIED = object()
if op_info.name == "sum":
sample = _validate_sample_input_sparse_reduction_sum(sample)
if op_info.name in {"masked.sum"}:
mask = sample.kwargs.get("mask", UNSPECIFIED)
if (
mask not in {None, UNSPECIFIED}
and mask.ndim > 2
and mask.layout is torch.strided
and (mask == 0).any()
):
# TODO: remove this if-block after gh-98495 is fixed.
sample = ErrorInput(
sample,
error_regex="Expect the same number of specified elements per batch.",
)
elif not sample.kwargs.get("keepdim"):
sample = ErrorInput(
sample,
error_type=(AssertionError, RuntimeError),
error_regex="reduction operations on (CSR|CSC) tensors with keepdim=False is unsupported",
)
elif mask is UNSPECIFIED:
sample = ErrorInput(
sample,
error_type=ValueError,
error_regex="masked (.*) expects explicit mask for sparse_csr tensor input",
)
elif sample.input.ndim > 2:
sample = ErrorInput(
sample,
error_regex="crow_indices is supposed to be a vector, but got 3 dimensional tensor.",
)
if op_info.name in {"masked.amax", "masked.amin", "masked.mean", "masked.prod"}:
t_inp = sample.input
batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim()
mask = sample.kwargs.get("mask")
if (
mask is not None
and mask.ndim > 2
and mask.layout is torch.strided
and (mask == 0).any()
):
# TODO: remove this if-block after gh-98495 is fixed.
sample = ErrorInput(
sample,
error_regex="Expect the same number of specified elements per batch.",
)
elif mask is None:
sample = ErrorInput(
sample,
error_type=ValueError,
error_regex="masked (.*) expects explicit mask for sparse_csr tensor input",
)
elif (
mask.layout is sample.input.layout
and mask.ndim > 2
and op_info.name == "masked.mean"
):
sample = ErrorInput(
sample,
error_type=TypeError,
error_regex=(
"where[(][)] received an invalid combination of arguments"
" - got [(]Tensor, Tensor, NoneType[)]"
),
)
elif not sample.kwargs.get("keepdim"):
sample = ErrorInput(
sample,
error_type=(AssertionError, RuntimeError),
error_regex="reduction operations on (CSR|CSC) tensors with keepdim=False is unsupported",
)
elif (
sample.input.ndim > 2
and (sample.kwargs.get("dim") not in {0, 1})
and mask.ndim > 2
and mask.layout is not torch.strided
):
if sample.kwargs.get("dim") == (0, -1):
sample = ErrorInput(
sample,
error_regex="tensor dimensionality must be sum of batch, base, and dense dimensionalities",
)
elif op_info.name == "masked.prod":
sample = ErrorInput(
sample,
error_regex="input_dim == 2 INTERNAL ASSERT FAILED at",
)
else:
sample = ErrorInput(
sample,
error_type=AssertionError,
error_regex="Sparse CSR tensors are 2D and only support reduction along dim 0 or 1.",
)
elif sample.input.ndim > 2:
sample = ErrorInput(
sample,
error_regex="crow_indices is supposed to be a vector, but got 3 dimensional tensor.",
)
elif (
mask.layout is t_inp.layout
and mask._nnz() != t_inp._nnz()
and t_inp.dense_dim() > 0
):
sample = ErrorInput(
sample,
error_regex="Index tensor must have the same number of dimensions as src tensor",
)
if check_validate:
_check_validate(op_info, sample)
return sample
def _validate_sample_input_sparse_reduction_sum(sample, check_validate=False):
# NOTE: When fixing a failing sample case, remove the
# corresponding if-block
t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs
dim = t_kwargs.get("dim")
keepdim = t_kwargs.get("keepdim")
layout = t_inp.layout
if isinstance(dim, (int, list, tuple)):
if layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}:
if layout in {torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:
return ErrorInput(
sample,
error_regex=(
"Currently the only compressed sparse format supported for sum.dim_IntList is CSR, but got layout"
),
)
if layout in {torch.sparse_csr, torch.sparse_csc} and not keepdim:
return ErrorInput(
sample,
error_regex=(
"reduction operations on CSR tensors with keepdim=False is unsupported"
),
)
if t_inp.dim() != 2:
return ErrorInput(
sample,
error_regex=("input_dim == 2 INTERNAL ASSERT"),
)
if layout == torch.sparse_csr:
if t_inp.dtype == torch.bool:
return ErrorInput(
sample,
error_regex=("_sparse_csr_sum_cpu not implemented for 'Bool'"),
)
if t_inp.dtype == torch.complex32:
return ErrorInput(
sample,
error_regex=(
"_sparse_csr_sum_cuda not implemented for 'ComplexHalf'"
),
)
return sample
def _maybe_failing_sample_inputs_sparse_reduction_sum(
op_info, device, dtype, requires_grad, layout, **kwargs
):
"""Generator of samples that are known to fail or that were failing in past."""
# NOTE: When fixing a failing case, remove the Exception comment
# but keep the `yield sample` statement.
if layout in [
torch.sparse_csr,
torch.sparse_csc,
]:
# NotImplementedError: Could not run 'aten::sum.IntList_out' with arguments from the 'SparseCsrCPU' backend.
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout)
.requires_grad_(requires_grad),
kwargs=dict(dim=0, keepdim=True),
)
yield SampleInput(
torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype)
.to_sparse(layout=layout, dense_dim=1)
.requires_grad_(requires_grad),
kwargs=dict(dim=0),
)
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout)
.requires_grad_(requires_grad),
kwargs=dict(dim=(0,)),
)
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout)
.requires_grad_(requires_grad),
kwargs=dict(dim=(0,), keepdim=True),
)
yield SampleInput(
torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype)
.to_sparse(layout=layout, dense_dim=1)
.requires_grad_(requires_grad),
kwargs=dict(dim=(0,)),
)
# RuntimeError: torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size [2]
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout)
.requires_grad_(requires_grad),
kwargs=dict(dim=0),
)
if layout in [
torch.sparse_bsr,
torch.sparse_bsc,
]:
# RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsr
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout, blocksize=(2, 2))
.requires_grad_(requires_grad),
kwargs=dict(dim=0, keepdim=True),
)
yield SampleInput(
torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype)
.to_sparse(layout=layout, dense_dim=1, blocksize=(1, 1))
.requires_grad_(requires_grad),
kwargs=dict(dim=0),
)
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout, blocksize=(1, 1))
.requires_grad_(requires_grad),
kwargs=dict(dim=(0,)),
)
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout, blocksize=(1, 1))
.requires_grad_(requires_grad),
kwargs=dict(dim=(0,), keepdim=True),
)
yield SampleInput(
torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype)
.to_sparse(layout=layout, blocksize=(1, 1), dense_dim=1)
.requires_grad_(requires_grad),
kwargs=dict(dim=(0,)),
)
# RuntimeError: torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size [2]
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype)
.to_sparse(layout=layout, blocksize=(1, 1))
.requires_grad_(requires_grad),
kwargs=dict(dim=0),
)
def sample_inputs_sparse_reduction_sum(
op_info, device, dtype, requires_grad, layout, **kwargs
):
"""Sample inputs for sum on sparse tensors."""
yield from _sample_inputs_sparse(
sample_inputs_sparse_reduction,
_maybe_failing_sample_inputs_sparse_reduction_sum,
_validate_sample_input_sparse_reduction,
op_info,
device,
dtype,
requires_grad,
layout,
**kwargs,
)
def error_inputs_sparse_reduction_sum(op_info, device, layout, **kwargs):
"""Error inputs for sum on sparse tensors."""
dtype = torch.float64
requires_grad = False
yield from _error_inputs_sparse(
_maybe_failing_sample_inputs_sparse_reduction_sum,
_validate_sample_input_sparse_reduction,
op_info,
device,
dtype,
requires_grad,
layout,
**kwargs,
)
def sample_inputs_sparse_elementwise_binary_operation(
op_info, device, dtype, requires_grad, layout, **kwargs
):
"""Sample inputs for elementwise binary operations on sparse tensors.
The samples include regular, zero-sized, batched, and hybrid
sparse tensors as well as rhs scalars. All tensors are full tensors.
"""
def _to_sparse(tensor, **kwargs):
return tensor.detach().to_sparse(**kwargs).requires_grad_(requires_grad)
for sample_input in generate_elementwise_binary_tensors(
op_info,
device=device,
dtype=dtype,
requires_grad=requires_grad,
exclude_zero=True,
**kwargs,
):
lhs, rhs = sample_input.input, sample_input.args[0]
min_dense_dim = 0
max_dense_dim = lhs.ndim - 1
if layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}:
if lhs.ndim < 2:
# sparse compressed tensors sparse_dim must be 2
continue
max_dense_dim = lhs.ndim - 2
for dense_dim in range(min_dense_dim, max_dense_dim + 1):
if layout in {torch.sparse_bsr, torch.sparse_bsc}:
blocksizes = [(1, 1)]
if lhs.numel() > 0:
blocksizes.append(
(
lhs.shape[lhs.ndim - 2 - dense_dim],
lhs.shape[lhs.ndim - 1 - dense_dim],
)
)
else:
blocksizes = [None]
for blocksize in blocksizes:
to_sparse_kwargs = dict(
layout=layout, dense_dim=dense_dim, blocksize=blocksize
)
lhs_sparse = _to_sparse(lhs, **to_sparse_kwargs)
rhs_sparse = _to_sparse(rhs, **to_sparse_kwargs)
# op(sparse, sparse)
yield SampleInput(
lhs_sparse,
args=(rhs_sparse, *sample_input.args[1:]),
kwargs=sample_input.kwargs,
)
# op(sparse, scalar)
yield SampleInput(
lhs_sparse,
args=(
make_tensor(
(), dtype=dtype, device=device, requires_grad=requires_grad
),
*sample_input.args[1:],
),
kwargs=sample_input.kwargs,
)
def _validate_sample_input_elementwise_binary_sparse_mul(sample):
# NOTE: When fixing a failing sample case, remove the
# corresponding if-block
t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs
batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim()
layout = t_inp.layout
dtype = t_inp.dtype
if layout is torch.sparse_csr and batch_dim > 0 and t_args[0].ndim > 0:
return ErrorInput(
sample,
error_regex="crow_indices is supposed to be a vector, but got 2 dimensional tensor",
)
elif layout is torch.sparse_csc and t_args[0].ndim > 0:
return ErrorInput(
sample, error_regex="Expected result Tensor to be of format CSR"
)
elif layout is torch.sparse_bsr and t_args[0].ndim > 0:
return ErrorInput(
sample,
error_regex="empty_sparse_compressed expected sparse compressed [(]non-block[)] tensor layout but got SparseBsr",
)
elif layout is torch.sparse_bsc and t_args[0].ndim > 0:
return ErrorInput(
sample,
error_regex="empty_sparse_compressed expected sparse compressed [(]non-block[)] tensor layout but got SparseBsc",
)
elif (
layout is torch.sparse_coo
and dtype is torch.bool
and t_args[0].ndim > 0
and t_inp.is_cpu
and t_inp.numel() > 0
and t_inp.dense_dim() > 0
):
return ErrorInput(
sample, error_regex="\"addcmul_cpu_out\" not implemented for 'Bool'"
)
elif (
layout in {torch.sparse_coo, torch.sparse_csr}
and dtype is torch.bool
and t_inp._nnz() > 0
and t_args[0].ndim > 0
and t_inp.is_cpu
and t_inp.numel() > 0
):
return ErrorInput(
sample, error_regex="\"mul_out_sparse\" not implemented for 'Bool'"
)
elif (
layout is torch.sparse_csr
and t_args[0].layout is torch.strided
and 0 < t_args[0].ndim
and t_args[0].ndim < t_inp.ndim
):
return ErrorInput(
sample, error_regex="sparse_mask_sparse_csr expects self to be 2D"
)
elif layout is torch.sparse_csr and (
(t_args[0].layout is torch.strided and 0 < t_args[0].ndim)
or (t_args[0].layout is layout and t_inp.shape != t_args[0].shape)
):
return ErrorInput(
sample,
error_regex=(
"expects sparse inputs with equal dimensionality, number of sparse dimensions,"
" and shape of sparse dimensions"
),
)
elif (
layout is torch.sparse_csr
and t_inp.dense_dim() > 0
and t_inp._nnz() > 0
and t_inp.is_cpu
and dtype is torch.float16
and t_args[0].ndim > 0
):
return ErrorInput(
sample, error_regex="\"addcmul_cpu_out\" not implemented for 'Half'"
)
return sample
@_apply_requires_grad_to_samples
def _maybe_failing_sample_inputs_sparse_elementwise_binary_mul(
op_info, device, dtype, requires_grad, layout, **kwargs
):
"""Generator of samples that are known to fail or that were failing in past."""
# NOTE: When fixing a failing case, remove the Exception comment
# but keep the `yield sample` statement.
blocksize = (1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None
regular = torch.tensor([[1, 2], [3, 4]], device=device, dtype=dtype).to_sparse(
layout=layout, dense_dim=0, blocksize=blocksize
)
batch = torch.tensor(
[[[1, 2], [3, 4]], [[4, 5], [6, 7]]], device=device, dtype=dtype
).to_sparse(layout=layout, dense_dim=0, blocksize=blocksize)
hybrid = torch.tensor(
[[[1], [2]], [[3], [4]]], device=device, dtype=dtype
).to_sparse(layout=layout, dense_dim=1, blocksize=blocksize)
if layout is torch.sparse_csr:
# RuntimeError: crow_indices is supposed to be a vector, but got 2 dimensional tensor
yield SampleInput(batch, args=(batch,))
# RuntimeError: Only tensors with two sparse dimensions can be
# converted to the SparseCsr layout, got self with 3 sparse
# dimensions.
yield SampleInput(
torch.zeros_like(hybrid).requires_grad_(requires_grad),
args=(torch.zeros_like(hybrid).requires_grad_(requires_grad),),
)
if dtype is torch.complex32:
# RuntimeError: "mul_out_sparse" not implemented for 'ComplexHalf'
yield SampleInput(regular, args=(regular,))
if dtype is torch.bool and regular.is_cpu:
# RuntimeError: "mul_out_sparse" not implemented for 'Bool'
yield SampleInput(regular, args=(regular,))
if layout is torch.sparse_csc:
# RuntimeError: Expected result Tensor to be of format CSR
yield SampleInput(regular, args=(regular,))
if layout is torch.sparse_bsr:
# RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsr
yield SampleInput(regular, args=(regular,))
if layout is torch.sparse_bsc:
# RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsc
yield SampleInput(regular, args=(regular,))
if layout is torch.sparse_coo:
if dtype is torch.complex32:
# RuntimeError: "mul_out_sparse" not implemented for 'ComplexHalf'
yield SampleInput(regular, args=(regular,))
if dtype is torch.bool and regular.is_cpu:
# RuntimeError: "mul_out_sparse" not implemented for 'Bool'
yield SampleInput(regular, args=(regular,))
if dtype in {torch.bool, torch.float16} and regular.is_cpu:
# RuntimeError: "addcmul_cpu_out" not implemented for '(Bool|Half)'
yield SampleInput(hybrid, args=(hybrid,))
def _validate_sample_input_sparse_elementwise_binary_operation(
op_info, sample, check_validate=False
):
if op_info.name == "mul":
sample = _validate_sample_input_elementwise_binary_sparse_mul(sample)
if check_validate:
_check_validate(op_info, sample)
return sample
def sample_inputs_sparse_mul(op_info, device, dtype, requires_grad, layout, **kwargs):
"""Sample inputs for mul operation on sparse tensors."""
yield from _sample_inputs_sparse(
sample_inputs_sparse_elementwise_binary_operation,
_maybe_failing_sample_inputs_sparse_elementwise_binary_mul,
_validate_sample_input_sparse_elementwise_binary_operation,
op_info,
device,
dtype,
requires_grad,
layout,
**kwargs,
)
def error_inputs_sparse_mul(op_info, device, layout, **kwargs):
"""Error inputs for mul operation on sparse tensors."""
dtype = torch.float64
requires_grad = False
yield from _error_inputs_sparse(
_maybe_failing_sample_inputs_sparse_elementwise_binary_mul,
_validate_sample_input_sparse_elementwise_binary_operation,
op_info,
device,
dtype,
requires_grad,
layout,
**kwargs,
)
def _sample_inputs_sparse_like_fns(
op_info, device, dtype, requires_grad, layout, **kwargs
):
from torch.testing._internal.common_utils import TestCase
for tensor in TestCase().generate_simple_inputs(
layout,
device=device,
dtype=dtype,
enable_batch=True,
enable_hybrid=True,
enable_zero_sized=True,
enable_non_contiguous_indices=False,
enable_non_contiguous_values=False,
):
yield SampleInput(tensor, args=(), kwargs={})
yield SampleInput(
tensor, args=(), kwargs=dict(device=device, dtype=dtype, layout=layout)
)
if dtype is not torch.float64:
yield SampleInput(tensor, args=(), kwargs=dict(dtype=torch.float64))
if torch.cuda.is_available():
other_device = "cuda" if tensor.device.type == "cpu" else "cpu"
yield SampleInput(tensor, args=(), kwargs=dict(device=other_device))
if layout is torch.sparse_csr:
other_layout = torch.sparse_csc
elif layout is torch.sparse_csc:
other_layout = torch.sparse_csr
elif layout is torch.sparse_bsr:
other_layout = torch.sparse_bsc
elif layout is torch.sparse_bsc:
other_layout = torch.sparse_bsr
else:
other_layout = torch.strided
yield SampleInput(tensor, args=(), kwargs=dict(layout=other_layout))
if layout is not torch.sparse_coo:
yield SampleInput(tensor, args=(), kwargs=dict(layout=torch.sparse_coo))
def _validate_sample_input_sparse_like_fns(op_info, sample, check_validate=False):
if sample.input.layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}:
if sample.kwargs.get("device", sample.input.device) != sample.input.device:
return ErrorInput(
sample,
error_regex=(
"device of (ccol|crow)_indices \\(=(cpu|cuda.*)\\) must"
" match device of values \\(=(cuda.*|cpu)\\)"
),
)
if sample.kwargs.get("layout", sample.input.layout) != sample.input.layout:
return ErrorInput(
sample,
error_regex=(
"empty_like with different sparse layout is not supported"
" \\(self is Sparse(Csc|Csr|Bsc|Bsr) but you requested Sparse(Csr|Csc|Bsr|Bsc)\\)"
),
)
if sample.input.layout is torch.sparse_coo:
return ErrorInput(
sample,
error_regex=(
"Could not run 'aten::normal_' with arguments from the 'Sparse(CPU|CUDA)' backend."
),
)
if check_validate:
_check_validate(op_info, sample)
return sample
def _maybe_failing_sample_inputs_sparse_like_fns(
op_info, device, dtype, requires_grad, layout, **kwargs
):
if torch.cuda.is_available() and layout is not torch.sparse_coo:
other_device = "cuda" if torch.device(device).type == "cpu" else "cpu"
if layout is torch.sparse_csr:
other_layout = torch.sparse_csc
elif layout is torch.sparse_csc:
other_layout = torch.sparse_csr
elif layout is torch.sparse_bsr:
other_layout = torch.sparse_bsc
elif layout is torch.sparse_bsc:
other_layout = torch.sparse_bsr
else:
other_layout = torch.strided
blocksize = (1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype, device=device).to_sparse(
layout=layout, blocksize=blocksize
),
kwargs=dict(device=other_device),
)
yield SampleInput(
torch.tensor([[0, 1], [2, 3]], dtype=dtype, device=device).to_sparse(
layout=layout, blocksize=blocksize
),
kwargs=dict(layout=other_layout),
)
def sample_inputs_sparse_like_fns(
op_info, device, dtype, requires_grad, layout, **kwargs
):
"""Sample inputs for like-functions on sparse tensors."""
yield from _sample_inputs_sparse(
_sample_inputs_sparse_like_fns,
_maybe_failing_sample_inputs_sparse_like_fns,
_validate_sample_input_sparse_like_fns,
op_info,
device,
dtype,
requires_grad,
layout,
**kwargs,
)
def error_inputs_sparse_like_fns(op_info, device, layout, **kwargs):
"""Error inputs for like-functions on sparse tensors."""
dtype = torch.float64
requires_grad = False
yield from _error_inputs_sparse(
_maybe_failing_sample_inputs_sparse_like_fns,
_validate_sample_input_sparse_like_fns,
op_info,
device,
dtype,
requires_grad,
layout,
**kwargs,
)
def _validate_sample_input_sparse_default(op_info, sample, check_validate=False):
if op_info.name == "to_sparse":
if (
sample.input.layout
in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}
and len(sample.args) == 1
and isinstance(sample.args[0], int)
and sample.args[0] != 2
):
sample = ErrorInput(
sample,
error_regex="sparse dim argument must be 2 for sparse_compressed_to_sparse",
)
if check_validate:
_check_validate(op_info, sample)
return sample
def validate_sample_input_sparse(op_info, sample, check_validate=False):
"""Return the specified sample when it is valid and supported by the
operation. Otherwise, return the sample as ErrorInput instance.
When check_validate is True, the result is validated against
calling the op on the sample.
"""
if isinstance(op_info, ReductionOpInfo):
return _validate_sample_input_sparse_reduction(
op_info, sample, check_validate=check_validate
)
elif isinstance(op_info, BinaryUfuncInfo):
return _validate_sample_input_sparse_elementwise_binary_operation(
op_info, sample, check_validate=check_validate
)
else:
return _validate_sample_input_sparse_default(
op_info, sample, check_validate=check_validate
)
|
abfd0ff5ac1187358225657ce8ebccaae2f1f2cd
|
450916eee7580beb928ed8f387db4f0a8c1aa508
|
/src/amuse/community/sei/__init__.py
|
615c14382f2921c90db05fa7628700106a07cf81
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
amusecode/amuse
|
42095545893f5a86ea79c2a52ce54d3ce8eb204f
|
b57c1e2fda1457d5025307be105c2aa59b19b574
|
refs/heads/main
| 2023-08-31T04:50:48.880044
| 2023-08-30T12:00:20
| 2023-08-30T12:00:20
| 18,516,331
| 158
| 118
|
Apache-2.0
| 2023-08-30T12:00:22
| 2014-04-07T12:35:07
|
AMPL
|
UTF-8
|
Python
| false
| false
| 27
|
py
|
__init__.py
|
from .interface import Sei
|
924b25dc9b52bfb38eeb4b80bc3fee1f6d5288a3
|
fa2bb4efa5e19bc8378012c61a33c0d2f5709f74
|
/conda_smithy/azure_defaults.py
|
8852535507d712e523a64b6a78fc25f463b22677
|
[
"BSD-3-Clause"
] |
permissive
|
conda-forge/conda-smithy
|
bb7456ebaea7db3792edd5a5d1ae77f7b6bdf9e6
|
288dea84b54e5937d9202094b2bd0580aea12ebb
|
refs/heads/main
| 2023-08-27T12:12:36.091645
| 2023-08-24T07:06:28
| 2023-08-24T07:06:28
| 33,767,206
| 144
| 171
|
BSD-3-Clause
| 2023-09-12T07:56:48
| 2015-04-11T07:38:36
|
Python
|
UTF-8
|
Python
| false
| false
| 82
|
py
|
azure_defaults.py
|
AZURE_DEFAULT_ORG = "conda-forge"
AZURE_DEFAULT_PROJECT_NAME = "feedstock-builds"
|
f8953bcd635870e65aa2a0f2ab1cdda37cb4a79a
|
b1d941be5cd577ce34475339b021784aa9af6395
|
/libcloudforensics/providers/aws/forensics.py
|
505b3b1ea6e79077a08659c6af47644b44f29447
|
[
"Apache-2.0"
] |
permissive
|
google/cloud-forensics-utils
|
ef21ac682e040b5b977aa897aaf75b3b8ec1ed6d
|
38926ef5d075696b2b0f6714f3758be1e6ea1658
|
refs/heads/main
| 2023-09-04T11:05:42.136161
| 2023-08-28T03:25:22
| 2023-08-28T03:25:22
| 238,205,900
| 418
| 95
|
Apache-2.0
| 2023-09-14T05:55:03
| 2020-02-04T12:54:51
|
Python
|
UTF-8
|
Python
| false
| false
| 25,190
|
py
|
forensics.py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forensics on AWS."""
from typing import TYPE_CHECKING, Tuple, List, Optional, Dict, Any
import random
from time import sleep
from libcloudforensics.providers.aws.internal.common import ALINUX2_BASE_FILTER
from libcloudforensics.providers.aws.internal.common import UBUNTU_2204_FILTER
from libcloudforensics.providers.aws.internal import account
from libcloudforensics.providers.aws.internal import iam
from libcloudforensics.providers.utils.storage_utils import SplitStoragePath
from libcloudforensics.scripts import utils
from libcloudforensics import logging_utils
from libcloudforensics import errors
if TYPE_CHECKING:
from libcloudforensics.providers.aws.internal import ebs
from libcloudforensics.providers.aws.internal import ec2
logging_utils.SetUpLogger(__name__)
logger = logging_utils.GetLogger(__name__)
def CreateVolumeCopy(zone: str,
dst_zone: Optional[str] = None,
instance_id: Optional[str] = None,
volume_id: Optional[str] = None,
volume_type: Optional[str] = None,
src_profile: Optional[str] = None,
dst_profile: Optional[str] = None,
tags: Optional[Dict[str, str]] = None) -> 'ebs.AWSVolume':
"""Create a copy of an AWS EBS Volume.
By default, the volume copy will be created in the same AWS account where
the source volume sits. If you want the volume copy to be created in a
different AWS account, you can specify one in the dst_profile parameter.
The following example illustrates how you should configure your AWS
credentials file for such a use case.
# AWS credentials file
[default] # default account to use with AWS
aws_access_key_id=foo
aws_secret_access_key=bar
[investigation] # source account for a particular volume to be copied from
aws_access_key_id=foo1
aws_secret_access_key=bar1
[forensics] # destination account to create the volume copy in
aws_access_key_id=foo2
aws_secret_access_key=bar2
# Copies the boot volume from instance "instance_id" from the default AWS
# account to the default AWS account.
volume_copy = CreateVolumeCopy(zone, instance_id='instance_id')
# Copies the boot volume from instance "instance_id" from the default AWS
# account to the 'forensics' AWS account.
volume_copy = CreateVolumeCopy(
zone, instance_id='instance_id', dst_profile='forensics')
# Copies the boot volume from instance "instance_id" from the
# 'investigation' AWS account to the 'forensics' AWS account.
volume_copy = CreateVolumeCopy(
zone,
instance_id='instance_id',
src_profile='investigation',
dst_profile='forensics')
Args:
zone (str): The AWS zone in which the volume is located, e.g. 'us-east-2b'.
dst_zone (str): Optional. The AWS zone in which to create the volume
copy. By default, this is the same as 'zone'.
instance_id (str): Optional. Instance ID of the instance using the volume
to be copied. If specified, the boot volume of the instance will be
copied. If volume_id is also specified, then the volume pointed by
that volume_id will be copied.
volume_id (str): Optional. ID of the volume to copy. If not set,
then instance_id needs to be set and the boot volume will be copied.
volume_type (str): Optional. The volume type for the volume to be
created. Can be one of 'standard'|'io1'|'gp2'|'gp3'|'sc1'|'st1'. The
default behavior is to use the same volume type as the source volume.
src_profile (str): Optional. If the AWS account containing the volume
that needs to be copied is different from the default account
specified in the AWS credentials file then you can specify a
different profile name here (see example above).
dst_profile (str): Optional. If the volume copy needs to be created in a
different AWS account, you can specify a different profile name here
(see example above).
tags (Dict[str, str]): Optional. A dictionary of tags to add to the
volume copy, for example {'TicketID': 'xxx'}.
Returns:
AWSVolume: An AWS EBS Volume object.
Raises:
ResourceCreationError: If there are errors copying the volume, or errors
during KMS key creation/sharing if the target volume is encrypted.
ValueError: If both instance_id and volume_id are missing, or if AWS
account information could not be retrieved.
"""
if not instance_id and not volume_id:
raise ValueError(
'You must specify at least one of [instance_id, volume_id].')
source_account = account.AWSAccount(zone, aws_profile=src_profile)
destination_account = account.AWSAccount(zone, aws_profile=dst_profile)
kms_key_id = None
try:
if volume_id:
volume_to_copy = source_account.ebs.GetVolumeById(volume_id)
elif instance_id:
instance = source_account.ec2.GetInstanceById(instance_id)
volume_to_copy = instance.GetBootVolume()
if not volume_type:
volume_type = volume_to_copy.GetVolumeType()
logger.info('Volume copy of {0:s} started...'.format(
volume_to_copy.volume_id))
snapshot = volume_to_copy.Snapshot()
logger.info('Created snapshot: {0:s}'.format(snapshot.snapshot_id))
source_account_id = source_account.ebs.GetAccountInformation().get(
'Account')
destination_account_id = destination_account.ebs.GetAccountInformation(
).get('Account')
if not (source_account_id and destination_account_id):
raise ValueError(
'Could not retrieve AWS account ID: source {0!s}, dest: {1!s}'.format(
source_account_id, destination_account_id))
if source_account_id != destination_account_id:
logger.info('External account detected: source account ID is {0:s} and '
'destination account ID is {1:s}'.format(
source_account_id, destination_account_id))
if volume_to_copy.encrypted:
logger.info(
'Encrypted volume detected, generating one-time use CMK key')
# Generate one-time use KMS key that will be shared with the
# destination account.
kms_key_id = source_account.kms.CreateKMSKey()
source_account.kms.ShareKMSKeyWithAWSAccount(
kms_key_id, destination_account_id)
# Create a copy of the initial snapshot and encrypts it with the
# shared key
snapshot = snapshot.Copy(kms_key_id=kms_key_id, delete=True)
snapshot.ShareWithAWSAccount(destination_account_id)
logger.info('Snapshot successfully shared with external account')
if dst_zone and dst_zone != zone:
# Assign the new zone to the destination account and assign it to the
# snapshot so that it can copy it
destination_account = account.AWSAccount(
dst_zone, aws_profile=dst_profile)
snapshot.aws_account = destination_account
snapshot = snapshot.Copy(delete=True, deletion_account=source_account)
if tags and tags.get('Name'):
new_volume = destination_account.ebs.CreateVolumeFromSnapshot(
snapshot,
volume_type=volume_type,
volume_name=tags['Name'],
tags=tags)
else:
new_volume = destination_account.ebs.CreateVolumeFromSnapshot(
snapshot,
volume_type=volume_type,
volume_name_prefix='evidence',
tags=tags)
logger.info('Volume {0:s} successfully copied to {1:s}'.format(
volume_to_copy.volume_id, new_volume.volume_id))
logger.info('Cleaning up...')
snapshot.Delete()
# Delete the one-time use KMS key, if one was generated
source_account.kms.DeleteKMSKey(kms_key_id)
logger.info('Done')
except (errors.LCFError, RuntimeError) as exception:
raise errors.ResourceCreationError(
'Copying volume {0:s}: {1!s}'.format(
(volume_id or instance_id), exception), __name__) from exception
return new_volume
# pylint: disable=too-many-arguments
def StartAnalysisVm(
vm_name: str,
default_availability_zone: str,
boot_volume_size: int,
boot_volume_type: str = 'gp3',
ami: Optional[str] = None,
cpu_cores: int = 4,
attach_volumes: Optional[List[Tuple[str, str]]] = None,
dst_profile: Optional[str] = None,
ssh_key_name: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
subnet_id: Optional[str] = None,
security_group_id: Optional[str] = None,
userdata_file: Optional[str] = None
) -> Tuple['ec2.AWSInstance', bool]:
"""Start a virtual machine for analysis purposes.
Look for an existing AWS instance with tag name vm_name. If found,
this instance will be started and used as analysis VM. If not found, then a
new vm with that name will be created, started and returned.
Args:
vm_name (str): The name for the virtual machine.
default_availability_zone (str): Default zone within the region to create
new resources in.
boot_volume_size (int): The size of the analysis VM boot volume (in GB).
boot_volume_type (str): Optional. The volume type for the boot volume
of the VM. Can be one of 'standard'|'io1'|'gp2'|'gp3'|'sc1'|'st1'. The
default is 'gp3'.
ami (str): Optional. The Amazon Machine Image ID to use to create the VM.
Default is a version of Ubuntu 22.04.
cpu_cores (int): Optional. The number of CPU cores to create the machine
with. Default is 4.
attach_volumes (List[Tuple[str, str]]): Optional. List of tuples
containing the volume IDs (str) to attach and their respective device
name (str, e.g. /dev/sdf). Note that it is mandatory to provide a
unique device name per volume to attach.
dst_profile (str): Optional. The AWS account in which to create the
analysis VM. This is the profile name that is defined in your AWS
credentials file.
ssh_key_name (str): Optional. A SSH key pair name linked to the AWS
account to associate with the VM. If none provided, the VM can only
be accessed through in-browser SSH from the AWS management console
with the EC2 client connection package (ec2-instance-connect). Note
that if this package fails to install on the target VM, then the VM
will not be accessible. It is therefore recommended to fill in this
parameter.
tags (Dict[str, str]): Optional. A dictionary of tags to add to the
instance, for example {'TicketID': 'xxx'}. An entry for the instance
name is added by default.
subnet_id (str): Optional. The subnet to launch the instance in.
security_group_id (str): Optional. Security group ID to attach.
userdata_file (str): Optional. Filename to be read in as the userdata
launch script.
Returns:
Tuple[AWSInstance, bool]: a tuple with a virtual machine object
and a boolean indicating if the virtual machine was created or not.
Raises:
RuntimeError: When multiple AMI images are returned.
"""
aws_account = account.AWSAccount(
default_availability_zone, aws_profile=dst_profile)
# If no AMI ID is given we use the default Ubuntu 22.04
# in the region requested.
if not ami:
logger.info('No AMI provided, fetching one for Ubuntu 22.04')
qfilter = [{'Name': 'name', 'Values': [UBUNTU_2204_FILTER]}]
ami_list = aws_account.ec2.ListImages(qfilter)
# We should only get 1 AMI image back, if we get multiple we
# have no way of knowing which one to use.
if len(ami_list) > 1:
image_names = [image['Name'] for image in ami_list]
raise RuntimeError('error - ListImages returns >1 AMI image: [{0:s}]'
.format(', '.join(image_names)))
ami = ami_list[0]['ImageId']
assert ami # Mypy: assert that ami is not None
if not userdata_file:
userdata_file = utils.FORENSICS_STARTUP_SCRIPT_AWS
userdata = utils.ReadStartupScript(userdata_file)
logger.info('Starting analysis VM {0:s}'.format(vm_name))
analysis_vm, created = aws_account.ec2.GetOrCreateVm(
vm_name,
boot_volume_size,
ami,
cpu_cores,
boot_volume_type=boot_volume_type,
ssh_key_name=ssh_key_name,
tags=tags,
subnet_id=subnet_id,
security_group_id=security_group_id,
userdata=userdata)
logger.info('VM started.')
for volume_id, device_name in (attach_volumes or []):
logger.info('Attaching volume {0:s} to device {1:s}'.format(
volume_id, device_name))
analysis_vm.AttachVolume(
aws_account.ebs.GetVolumeById(volume_id), device_name)
logger.info('VM ready.')
return analysis_vm, created
# pylint: enable=too-many-arguments
def CopyEBSSnapshotToS3SetUp(
aws_account: account.AWSAccount,
instance_profile_name: str) -> Dict[str, Dict[str, Any]]:
"""Set up for CopyEBSSnapshotToS3. Creates the IAM components required, or
returns the existing ones if they exist already.
Args:
aws_account (account.AWSAccount): An AWS account object.
instance_profile_name (str): name of the instance profile to create.
Returns: A Dict containing:
'profile':
'arn': The ARN of the profile.
'created': True if the profile was created; False if it existed already.
'policy':
'arn': The ARN of the policy.
'created': True if the policy was created; False if it existed already.
'role':
'name': The name of the role.
'created': True if the role was created; False if it existed already.
Raises:
ResourceCreationError: If any IAM resource could not be created.
"""
# Create the IAM pieces
ebs_copy_policy_doc = iam.ReadPolicyDoc(iam.EBS_COPY_POLICY_DOC)
ec2_assume_role_doc = iam.ReadPolicyDoc(iam.EC2_ASSUME_ROLE_POLICY_DOC)
policy_name = '{0:s}-policy'.format(instance_profile_name)
role_name = '{0:s}-role'.format(instance_profile_name)
instance_profile_arn, prof_created = aws_account.iam.CreateInstanceProfile(
instance_profile_name)
policy_arn, pol_created = aws_account.iam.CreatePolicy(
policy_name, ebs_copy_policy_doc)
_, role_created = aws_account.iam.CreateRole(
role_name, ec2_assume_role_doc)
aws_account.iam.AttachPolicyToRole(
policy_arn, role_name)
aws_account.iam.AttachInstanceProfileToRole(
instance_profile_name, role_name)
return {
'profile': {'arn': instance_profile_arn, 'created': prof_created},
'policy': {'arn': policy_arn, 'created': pol_created},
'role': {'name': role_name, 'created': role_created}
}
def CopyEBSSnapshotToS3Process(
aws_account: account.AWSAccount,
s3_destination: str,
snapshot_id: str,
instance_profile_arn: str,
subnet_id: Optional[str] = None,
security_group_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Copy an EBS snapshot into S3.
Unfortunately, this action is not natively supported in AWS, so it requires
creating a volume and attaching it to an instance. This instance, using a
userdata script then performs a `dd` operation to send the disk image to S3.
Args:
aws_account (account.AWSAccount): An AWS account to use for the operation.
s3_destination (str): S3 directory in the form of s3://bucket/path/folder
snapshot_id (str): EBS snapshot ID.
instance_profile_arn (str): The name of an existing instance profile to
attach to the instance, or to create if it does not yet exist.
subnet_id (str): Optional. The subnet to launch the instance in.
security_group_id (str): Optional. Security group ID to attach.
Raises:
ResourceCreationError: If any dependent resource could not be created.
ResourceNotFoundError: If the snapshot ID cannot be found.
"""
# Correct destination if necessary
if not s3_destination.startswith('s3://'):
s3_destination = 's3://' + s3_destination
path_components = SplitStoragePath(s3_destination)
bucket = path_components[0]
object_path = path_components[1]
# read in the instance userdata script, sub in the snap id and S3 dest
startup_script = utils.ReadStartupScript(
utils.EBS_SNAPSHOT_COPY_SCRIPT_AWS).format(snapshot_id, s3_destination)
# Find the AMI - ALinux 2, latest version
logger.info('Finding AMI')
qfilter = [
{'Name': 'name', 'Values': [ALINUX2_BASE_FILTER]},
{'Name':'owner-alias', 'Values':['amazon']}
]
results = aws_account.ec2.ListImages(qfilter)
# Find the most recent
ami_id = None
date = ''
for result in results:
if result['CreationDate'] > date:
ami_id = result['ImageId']
date = result['CreationDate']
if not ami_id:
raise errors.ResourceCreationError(
'Could not fnd suitable AMI for instance creation', __name__)
# start the VM
logger.info('Starting copy instance')
aws_account.ec2.GetOrCreateVm(
'ebsCopy-{0:d}'.format(random.randint(10**(9),(10**10)-1)),
10,
ami_id,
4,
subnet_id=subnet_id,
security_group_id=security_group_id,
userdata=startup_script,
instance_profile=instance_profile_arn,
terminate_on_shutdown=True,
wait_for_health_checks=False
)
logger.info('Pausing 60 seconds while copy instance launches')
sleep(60)
# Calculate the times we should check for completion based on volume size
# and transfer rates (documented in cloud-forensics-utils/issues/354)
snapshot_size = aws_account.ec2.GetSnapshotInfo(snapshot_id)['VolumeSize']
percentiles = [0.25, 0.5, 0.85, 1.15, 1.5, 2.0]
transfer_speed = 60 # seconds per GB
curr_wait = 0
success = False
prefix = '{0:s}/{1:s}/'.format(object_path, snapshot_id)
files = ['image.bin', 'log.txt', 'hlog.txt', 'mlog.txt']
logger.info('Transfer expected to take {0:d} seconds'.
format(snapshot_size * transfer_speed))
for percentile in percentiles:
curr_step = int(percentile * snapshot_size * transfer_speed)
logger.info('Waiting {0:d} seconds ({1:d} seconds total wait time) '
'to check for outputs'.format(curr_step - curr_wait, curr_step))
sleep(curr_step - curr_wait)
curr_wait = curr_step
checks = [aws_account.s3.CheckForObject(bucket, prefix + file)
for file in files]
if all(checks):
success = True
logger.info('Output files found')
break
if success:
logger.info('Image and hash copied to {0:s}/{1:s}/'.format(
s3_destination, snapshot_id))
else:
logger.info(
'Image copy timeout. The process may be ongoing, or might have failed.')
path_base = 's3://{0:s}{1:s}/{2:s}'.format(bucket,
'/' + object_path if object_path else '', snapshot_id)
return {
'image': path_base + '/image.bin',
'hashes': [
path_base + '/log.txt',
path_base + '/hlog.txt',
path_base + '/mlog.txt'
]
}
def CopyEBSSnapshotToS3TearDown(
aws_account: account.AWSAccount,
instance_profile_name: str,
iam_details: Dict[str, Dict[str, Any]]
) -> None:
"""Removes the IAM components created by CopyEBSSnapshotToS3SetUp, if any
were created anew.
Args:
aws_account (account.AWSAccount): An AWS account object.
instance_profile_name (str): The name of the instance profile.
iam_details (Dict[str, Dict[str, Any]]): The Dict returned by the SetUp
method.
"""
if iam_details['role']['created'] and iam_details['policy']['created']:
aws_account.iam.DetachInstanceProfileFromRole(
iam_details['role']['name'], instance_profile_name)
if iam_details['profile']['created']:
aws_account.iam.DetachPolicyFromRole(
iam_details['policy']['arn'], iam_details['role']['name'])
aws_account.iam.DeleteInstanceProfile(instance_profile_name)
if iam_details['role']['created']:
aws_account.iam.DeleteRole(iam_details['role']['name'])
if iam_details['policy']['created']:
aws_account.iam.DeletePolicy(iam_details['policy']['arn'])
def CopyEBSSnapshotToS3(
s3_destination: str,
snapshot_id: str,
instance_profile_name: str,
zone: str,
subnet_id: Optional[str] = None,
security_group_id: Optional[str] = None,
cleanup_iam: bool = False
) -> Dict[str, Any]:
"""Copy an EBS snapshot into S3.
Unfortunately, this action is not natively supported in AWS, so it requires
creating a volume and attaching it to an instance. This instance, using a
userdata script then performs a `dd` operation to send the disk image to S3.
Uses the components methods of SetUp, Process and TearDown. If you want to
copy multiple snapshots, consider using those methods directly.
Args:
s3_destination (str): S3 directory in the form of s3://bucket/path/folder
snapshot_id (str): EBS snapshot ID.
instance_profile_name (str): The name of an existing instance profile to
attach to the instance, or to create if it does not yet exist.
zone (str): AWS Availability Zone the instance will be launched in.
subnet_id (str): Optional. The subnet to launch the instance in.
security_group_id (str): Optional. Security group ID to attach.
cleanup_iam (bool): If we created IAM components, remove them afterwards
Raises:
ResourceCreationError: If any dependent resource could not be created.
ResourceNotFoundError: If the snapshot ID cannot be found.
"""
aws_account = account.AWSAccount(zone)
iam_details = CopyEBSSnapshotToS3SetUp(aws_account, instance_profile_name)
# Instance role creation has a propagation delay between creating in IAM and
# being usable in EC2.
if iam_details['profile']['created']:
sleep(20)
outputs = CopyEBSSnapshotToS3Process(aws_account,
s3_destination,
snapshot_id,
iam_details['profile']['arn'],
subnet_id,
security_group_id)
if cleanup_iam:
CopyEBSSnapshotToS3TearDown(aws_account, instance_profile_name, iam_details)
return outputs
def InstanceNetworkQuarantine(
zone: str,
instance_id: str,
exempted_src_subnets: Optional[List[str]] = None
) -> None:
"""Put an AWS EC2 instance in network quarantine.
Network quarantine is imposed via applying empty security groups to the
instance.
Args:
zone (str): AWS Availability Zone the instance is in.
instance_id (str): The id (i-xxxxxx) of the virtual machine.
exempted_src_subnets (List[str]): List of subnets that will be permitted
Raises:
ResourceNotFoundError: If the instance cannot be found.
ResourceCreationError: If the security group could not be created.
AddressValueError: If a provided subnet is invalid.
"""
# Add /32 to any specified subnets that don't have a mask
# We're not checking the subnet is well formed, CreateIsolationSecurityGroup
# will take care of that
if exempted_src_subnets:
exempted_src_subnets[:] = [subnet if '/' in subnet else subnet + '/32'
for subnet in exempted_src_subnets]
try:
aws_account = account.AWSAccount(zone)
vpc = aws_account.ec2.GetInstanceById(instance_id).vpc
logger.info('Creating isolation security group')
sg_id = \
aws_account.ec2.CreateIsolationSecurityGroup(vpc, exempted_src_subnets)
logger.info('Replacing attached security groups with isolation group')
aws_account.ec2.SetInstanceSecurityGroup(instance_id, sg_id)
except errors.ResourceNotFoundError as exception:
raise errors.ResourceNotFoundError(
'Cannot qurantine non-existent instance {0:s}: {1!s}'.format(instance_id,
exception), __name__) from exception
def InstanceProfileMitigator(
zone: str,
instance_id: str,
revoke_existing: bool = False
) -> None:
"""Remove an instance profile attachment from an instance.
Also, optionally revoke existing issued tokens for the profile.
Args:
zone (str): AWS Availability Zone the instance is in.
instance_id (str): The id (i-xxxxxx) of the virtual machine.
revoke_existing (bool): True to revoke existing tokens for the profile's
role. False otherwise.
Raises:
ResourceNotFoundError: If the instance cannot be found, or does not have a
profile attachment.
"""
logger.info('Finding profile attachment')
aws_account = account.AWSAccount(zone)
assoc_id, profile = aws_account.ec2.GetInstanceProfileAttachment(instance_id)
if not profile or not assoc_id:
raise errors.ResourceNotFoundError(
'Instance not found or does not have a profile attachment: {0:s}'.
format(instance_id), __name__)
logger.info('Removing profile attachment')
aws_account.ec2.DisassociateInstanceProfile(assoc_id)
if revoke_existing:
logger.info('Invalidating old tokens')
role_name = profile.split('/')[1]
aws_account.iam.RevokeOldSessionsForRole(role_name)
|
2598971a0b31d3985cb7380a3673cfd76b908edc
|
2fd7505c101e03ea53a84fedde3ab1677c6947c6
|
/src/OpenSSL/debug.py
|
e39b128a7e2d94df9753a4a6beb2bd4bdac482d3
|
[
"Apache-2.0"
] |
permissive
|
pyca/pyopenssl
|
2c9467932c16ff532210a388a1526813ccd2b232
|
b259bfbd660f5680783e1abfab27ed0d5bf91c1a
|
refs/heads/main
| 2023-08-27T19:14:25.037914
| 2023-07-29T15:46:41
| 2023-07-29T15:46:41
| 15,778,059
| 829
| 436
|
Apache-2.0
| 2023-09-05T11:56:59
| 2014-01-09T20:10:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
debug.py
|
from __future__ import print_function
import ssl
import sys
import cffi
import cryptography
import OpenSSL.SSL
from . import version
_env_info = """\
pyOpenSSL: {pyopenssl}
cryptography: {cryptography}
cffi: {cffi}
cryptography's compiled against OpenSSL: {crypto_openssl_compile}
cryptography's linked OpenSSL: {crypto_openssl_link}
Python's OpenSSL: {python_openssl}
Python executable: {python}
Python version: {python_version}
Platform: {platform}
sys.path: {sys_path}""".format(
pyopenssl=version.__version__,
crypto_openssl_compile=OpenSSL._util.ffi.string(
OpenSSL._util.lib.OPENSSL_VERSION_TEXT,
).decode("ascii"),
crypto_openssl_link=OpenSSL.SSL.SSLeay_version(
OpenSSL.SSL.SSLEAY_VERSION
).decode("ascii"),
python_openssl=getattr(ssl, "OPENSSL_VERSION", "n/a"),
cryptography=cryptography.__version__,
cffi=cffi.__version__,
python=sys.executable,
python_version=sys.version,
platform=sys.platform,
sys_path=sys.path,
)
if __name__ == "__main__":
print(_env_info)
|
8938bcffb914ab6f27f002312d13f11dfd1020fe
|
7ebb2f0458d3813737dd045473d7c1398d08392d
|
/tests/test_generate_distance_matrix.py
|
f44ba7061b99d577852dd0d53b5949aa99a9e093
|
[
"Python-2.0",
"BSD-3-Clause"
] |
permissive
|
clEsperanto/pyclesperanto_prototype
|
b3192d6984f45571fe0a7dfcceee2058bc4debbe
|
b465c8669f8e9326874139cf4b9c9af22c22757c
|
refs/heads/master
| 2023-09-04T11:07:55.828329
| 2023-08-25T17:18:30
| 2023-08-25T17:18:30
| 248,206,619
| 152
| 36
|
BSD-3-Clause
| 2023-05-23T09:44:51
| 2020-03-18T10:56:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 981
|
py
|
test_generate_distance_matrix.py
|
import pyclesperanto_prototype as cle
import numpy as np
def test_generate_distance_matrix():
gpu_input = cle.push(np.asarray([
[0, 0, 0, 0, 0],
[0, 1, 0, 3, 0],
[0, 0, 0, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 0, 0, 4]
]))
gpu_reference = cle.push(np.asarray([
[0., 0. , 0. , 0. , 0. ],
[0., 0. , 2.236068 , 2. , 4.2426405 ],
[0., 2.236068 , 0. , 2.236068 , 2.236068 ],
[0., 2. , 2.236068 , 0. , 3.1622777 ],
[0., 4.2426405, 2.236068 , 3.1622777 , 0. ]
]))
gpu_pointlist = cle.labelled_spots_to_pointlist(gpu_input)
gpu_distance_matrix = cle.generate_distance_matrix(gpu_pointlist, gpu_pointlist)
a = cle.pull(gpu_distance_matrix)
b = cle.pull(gpu_reference)
print(a)
print(b)
assert (np.allclose(a, b, 0.001))
|
e03c1ea59fb37817bb8d9f67d83c8fb1ed7da1d4
|
a257e6ae61a5448d4ebff0c23de35cbaa2748c69
|
/basketball_reference_scraper/constants.py
|
c8d3c60747e2c429070bb5fc2b75fa0e38e2ef9b
|
[
"MIT"
] |
permissive
|
vishaalagartha/basketball_reference_scraper
|
ceed216682d853ec3a9e193977e4504187e4ed87
|
cbda552ac8818a1ddafed2d73726ea8de25b19c4
|
refs/heads/master
| 2023-05-26T14:07:20.895765
| 2022-08-19T17:15:56
| 2022-08-19T17:15:56
| 232,410,409
| 240
| 97
|
MIT
| 2023-09-06T16:05:28
| 2020-01-07T20:23:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,001
|
py
|
constants.py
|
TEAM_TO_TEAM_ABBR = {
'ATLANTA HAWKS': 'ATL',
'ST. LOUIS HAWKS': 'SLH',
'MILWAUKEE HAWKS': 'MIL',
'TRI-CITIES BLACKHAWKS': 'TCB',
'BOSTON CELTICS': 'BOS',
'BROOKLYN NETS': 'BRK',
'NEW JERSEY NETS' : 'NJN',
'NEW YORK NETS' : 'NYN',
'CHICAGO BULLS': 'CHI',
'CHARLOTTE HORNETS': 'CHO',
'CHARLOTTE BOBCATS' : 'CHA',
'CLEVELAND CAVALIERS': 'CLE',
'DALLAS MAVERICKS': 'DAL',
'DENVER NUGGETS': 'DEN',
'DETROIT PISTONS': 'DET',
'FORT WAYNE PISTONS': 'FWP',
'GOLDEN STATE WARRIORS': 'GSW',
'SAN FRANCISCO WARRIORS': 'SFW',
'PHILADELPHIA WARRIORS': 'PHI',
'HOUSTON ROCKETS': 'HOU',
'SAN DIEGO ROCKETS': 'HOU',
'INDIANA PACERS': 'IND',
'LOS ANGELES CLIPPERS': 'LAC',
'SAN DIEGO CLIPPERS': 'SDC',
'BUFFALO BRAVES': 'BUF',
'LOS ANGELES LAKERS': 'LAL',
'MINNEAPOLIS LAKERS': 'MIN',
'MEMPHIS GRIZZLIES': 'MEM',
'VANCOUVER GRIZZLIES' : 'VAN',
'MIAMI HEAT': 'MIA',
'MILWAUKEE BUCKS': 'MIL',
'MINNESOTA TIMBERWOLVES': 'MIN',
'NEW ORLEANS PELICANS' : 'NOP',
'NEW ORLEANS/OKLAHOMA CITY HORNETS' : 'NOK',
'NEW ORLEANS HORNETS' : 'NOH',
'NEW YORK KNICKS' : 'NYK',
'OKLAHOMA CITY THUNDER' : 'OKC',
'SEATTLE SUPERSONICS' : 'SEA',
'ORLANDO MAGIC' : 'ORL',
'PHILADELPHIA 76ERS' : 'PHI',
'SYRACUSE NATIONALS' : 'SYR',
'PHOENIX SUNS' : 'PHO',
'PORTLAND TRAIL BLAZERS' : 'POR',
'SACRAMENTO KINGS' : 'SAC',
'KANSAS CITY KINGS' : 'KCK',
'KANSAS CITY-OMAHA KINGS' : 'KCK',
'CINCINNATI ROYALS' : 'CIN',
'ROCHESTER ROYALS': 'ROR',
'SAN ANTONIO SPURS' : 'SAS',
'TORONTO RAPTORS' : 'TOR',
'UTAH JAZZ' : 'UTA',
'NEW ORLEANS JAZZ' : 'NOJ',
'WASHINGTON WIZARDS' : 'WAS',
'WASHINGTON BULLETS' : 'WAS',
'CAPITAL BULLETS' : 'CAP',
'BALTIMORE BULLETS' : 'BAL',
'CHICAGO ZEPHYRS' : 'CHI',
'CHICAGO PACKERS' : 'CHI',
# DEFUNCT FRANCHISES
'ANDERSON PACKERS': 'AND',
'CHICAGO STAGS': 'CHI',
'INDIANAPOLIS OLYMPIANS': 'IND',
'SHEBOYGAN RED SKINS': 'SRS',
'ST. LOUIS BOMBERS': 'SLB',
'WASHINGTON CAPITOLS' : 'WAS',
'WATERLOO HAWKS': 'WAT',
}
TEAM_SETS = [['STL', 'TRI', 'MLH', 'ATL'],
['BOS'],
['NJN', 'BRK', 'NYN', 'NJA', 'NYA'],
['CHO', 'CHA', 'CHH'],
['CHI'],
['CLE'],
['DAL'],
['DEN', 'DNR', 'DNA'],
['DET', 'FTW'],
['GSW', 'SFW', 'PHW'],
['SDR', 'HOU'],
['INA', 'IND'],
['SDC', 'LAC', 'BUF'],
['LAL', 'MNL'],
['MEM', 'VAN'],
['MIA'],
['MIL'],
['MIN'],
['NOP', 'NOH', 'NOK'],
['NYK'],
['SEA', 'OKC'],
['ORL'],
['PHI', 'SYR'],
['PHO'],
['POR'],
['CIN', 'SAC', 'KCO', 'KCK', 'ROC'],
['DLC', 'SAA', 'SAS', 'TEX'],
['TOR'],
['NOJ', 'UTA'],
['WSB', 'CHP', 'CAP', 'BAL', 'WAS', 'CHZ']]
|
0f38f3678de35a8d5dcbadd9772dd3792f47ecca
|
a29afc1d7342271ecfd2f4952c859a7a6e665a7a
|
/stellar_sdk/xdr/transaction_signature_payload_tagged_transaction.py
|
44b5d78e3f94353ff1a20fd7b9e0bb1ffbcdf5c2
|
[
"Apache-2.0"
] |
permissive
|
StellarCN/py-stellar-base
|
20252abb8ae90b20ac4d7a071046b52a8ccfb273
|
259ae05ca8155bd1e09fc5d83b8f6c6431eedf31
|
refs/heads/main
| 2023-09-02T17:18:18.158221
| 2023-07-21T02:47:10
| 2023-07-21T02:47:10
| 43,143,745
| 365
| 205
|
Apache-2.0
| 2023-09-14T02:08:18
| 2015-09-25T13:26:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,203
|
py
|
transaction_signature_payload_tagged_transaction.py
|
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib3 import Packer, Unpacker
from .envelope_type import EnvelopeType
from .fee_bump_transaction import FeeBumpTransaction
from .transaction import Transaction
__all__ = ["TransactionSignaturePayloadTaggedTransaction"]
class TransactionSignaturePayloadTaggedTransaction:
"""
XDR Source Code::
union switch (EnvelopeType type)
{
// Backwards Compatibility: Use ENVELOPE_TYPE_TX to sign ENVELOPE_TYPE_TX_V0
case ENVELOPE_TYPE_TX:
Transaction tx;
case ENVELOPE_TYPE_TX_FEE_BUMP:
FeeBumpTransaction feeBump;
}
"""
def __init__(
self,
type: EnvelopeType,
tx: Transaction = None,
fee_bump: FeeBumpTransaction = None,
) -> None:
self.type = type
self.tx = tx
self.fee_bump = fee_bump
def pack(self, packer: Packer) -> None:
self.type.pack(packer)
if self.type == EnvelopeType.ENVELOPE_TYPE_TX:
if self.tx is None:
raise ValueError("tx should not be None.")
self.tx.pack(packer)
return
if self.type == EnvelopeType.ENVELOPE_TYPE_TX_FEE_BUMP:
if self.fee_bump is None:
raise ValueError("fee_bump should not be None.")
self.fee_bump.pack(packer)
return
@classmethod
def unpack(
cls, unpacker: Unpacker
) -> "TransactionSignaturePayloadTaggedTransaction":
type = EnvelopeType.unpack(unpacker)
if type == EnvelopeType.ENVELOPE_TYPE_TX:
tx = Transaction.unpack(unpacker)
return cls(type=type, tx=tx)
if type == EnvelopeType.ENVELOPE_TYPE_TX_FEE_BUMP:
fee_bump = FeeBumpTransaction.unpack(unpacker)
return cls(type=type, fee_bump=fee_bump)
return cls(type=type)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(
cls, xdr: bytes
) -> "TransactionSignaturePayloadTaggedTransaction":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "TransactionSignaturePayloadTaggedTransaction":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.type == other.type
and self.tx == other.tx
and self.fee_bump == other.fee_bump
)
def __str__(self):
out = []
out.append(f"type={self.type}")
out.append(f"tx={self.tx}") if self.tx is not None else None
out.append(f"fee_bump={self.fee_bump}") if self.fee_bump is not None else None
return f"<TransactionSignaturePayloadTaggedTransaction [{', '.join(out)}]>"
|
b4a8d03fb418a1eb93cfc9ae61927996b88f3b91
|
306db13e7cbfb944323dca287cafe39440e28afa
|
/weasyl/frienduser.py
|
63319158c15a7ebee9e6d4884b51ac135973be11
|
[
"Apache-2.0"
] |
permissive
|
Weasyl/weasyl
|
3c03fed8ad9475c35725742c9d33589ac38832fc
|
dc63e71423e0be7efb3588152f980a7e3185ffb7
|
refs/heads/main
| 2023-08-11T07:33:16.351558
| 2023-08-05T02:28:25
| 2023-08-05T02:28:25
| 58,244,355
| 126
| 48
|
Apache-2.0
| 2023-09-12T02:31:05
| 2016-05-07T01:53:26
|
Python
|
UTF-8
|
Python
| false
| false
| 5,758
|
py
|
frienduser.py
|
import sqlalchemy as sa
from weasyl import define as d
from weasyl import ignoreuser
from weasyl import media
from weasyl import welcome
from weasyl.error import WeasylError
def check(userid, otherid):
"""
Check whether two users are confirmed friends.
A user is considered their own friend.
"""
if not userid or not otherid:
return False
if userid == otherid:
return True
return d.engine.scalar(
"SELECT EXISTS (SELECT FROM frienduser"
" WHERE ((userid, otherid) = (%(user)s, %(other)s) OR (userid, otherid) = (%(other)s, %(user)s))"
" AND settings !~ 'p')",
user=userid,
other=otherid,
)
def already_pending(userid, otherid):
"""
Check whether a pending friend request exists from userid to otherid.
Does not find friend requests in the other direction. Returns False if the
two users are already confirmed friends.
"""
assert userid and otherid and userid != otherid
return d.engine.scalar(
"SELECT EXISTS (SELECT 0 FROM frienduser WHERE (userid, otherid) = (%(user)s, %(other)s) AND settings ~ 'p')",
user=userid,
other=otherid,
)
def has_friends(otherid):
return d.engine.scalar(
"SELECT EXISTS (SELECT 0 FROM frienduser WHERE %(user)s IN (userid, otherid) AND settings !~ 'p')",
user=otherid,
)
def select_friends(userid, otherid, limit=None, backid=None, nextid=None):
"""
Return accepted friends.
"""
fr = d.meta.tables['frienduser']
pr = d.meta.tables['profile']
iu = d.meta.tables['ignoreuser']
friends = sa.union(
(sa
.select([fr.c.otherid, pr.c.username, pr.c.config])
.select_from(fr.join(pr, fr.c.otherid == pr.c.userid))
.where(sa.and_(fr.c.userid == otherid, fr.c.settings.op('!~')('p')))),
(sa
.select([fr.c.userid, pr.c.username, pr.c.config])
.select_from(fr.join(pr, fr.c.userid == pr.c.userid))
.where(sa.and_(fr.c.otherid == otherid, fr.c.settings.op('!~')('p')))))
friends = friends.alias('friends')
query = sa.select(friends.c)
if userid:
query = query.where(
~friends.c.otherid.in_(sa.select([iu.c.otherid]).where(iu.c.userid == userid)))
if backid:
query = query.where(
friends.c.username < sa.select([pr.c.username]).where(pr.c.userid == backid).scalar_subquery())
elif nextid:
query = query.where(
friends.c.username > sa.select([pr.c.username]).where(pr.c.userid == nextid).scalar_subquery())
query = query.order_by(
friends.c.username.desc() if backid else friends.c.username.asc())
query = query.limit(limit)
db = d.connect()
query = [{
"userid": r.otherid,
"username": r.username,
} for r in db.execute(query)]
ret = query[::-1] if backid else query
media.populate_with_user_media(ret)
return ret
def select_accepted(userid):
result = []
query = d.execute(
"SELECT fr.userid, p1.username, fr.otherid, p2.username FROM frienduser fr"
" INNER JOIN profile p1 ON fr.userid = p1.userid"
" INNER JOIN profile p2 ON fr.otherid = p2.userid"
" WHERE %i IN (fr.userid, fr.otherid) AND fr.settings !~ 'p'"
" ORDER BY p1.username", [userid])
for i in query:
if i[0] != userid:
result.append({
"userid": i[0],
"username": i[1],
})
else:
result.append({
"userid": i[2],
"username": i[3],
})
media.populate_with_user_media(result)
return result
def select_requests(userid):
query = d.execute("SELECT fr.userid, pr.username, fr.settings FROM frienduser fr"
" INNER JOIN profile pr ON fr.userid = pr.userid"
" WHERE fr.otherid = %i AND fr.settings ~ 'p'", [userid])
ret = [{
"userid": i[0],
"username": i[1],
"settings": i[2],
} for i in query]
media.populate_with_user_media(ret)
return ret
def request(userid, otherid):
if ignoreuser.check(otherid, userid):
raise WeasylError("IgnoredYou")
elif ignoreuser.check(userid, otherid):
raise WeasylError("YouIgnored")
if already_pending(otherid, userid):
d.execute("UPDATE frienduser SET settings = REPLACE(settings, 'p', '') WHERE (userid, otherid) = (%i, %i)",
[otherid, userid])
welcome.frienduseraccept_insert(userid, otherid)
welcome.frienduserrequest_remove(userid, otherid)
elif not already_pending(userid, otherid):
d.execute("INSERT INTO frienduser VALUES (%i, %i)", [userid, otherid])
welcome.frienduserrequest_remove(userid, otherid)
welcome.frienduserrequest_insert(userid, otherid)
def accept(userid, otherid):
if check(userid, otherid):
raise WeasylError("Unexpected")
d.execute("UPDATE frienduser SET settings = REPLACE(settings, 'p', '')"
" WHERE (userid, otherid) = (%i, %i)", [otherid, userid])
welcome.frienduseraccept_insert(userid, otherid)
welcome.frienduserrequest_remove(userid, otherid)
def remove(userid, otherid):
d.execute("DELETE FROM frienduser WHERE userid IN (%i, %i) AND otherid IN (%i, %i)",
[userid, otherid, userid, otherid])
welcome.frienduseraccept_remove(userid, otherid)
welcome.frienduserrequest_remove(userid, otherid)
def remove_request(userid, otherid):
d.execute(
"DELETE FROM frienduser "
"WHERE userid IN (%i, %i) "
"AND otherid IN (%i, %i)",
[userid, otherid, userid, otherid])
welcome.frienduserrequest_remove(userid, otherid)
|
a82c438ad4e2fa84f9cb37a366ec6cd7f6134dae
|
c530897cb72b6943c7226b25824444cad5f3503b
|
/usaspending_api/download/v2/year_limited_downloads.py
|
65663153a72697d925f4c7b1b043abf04cd0da23
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/usaspending-api
|
fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a
|
38f920438697930ae3ac57bbcaae9034877d8fb7
|
refs/heads/master
| 2023-09-01T22:00:36.633612
| 2023-08-29T18:39:18
| 2023-08-29T18:39:18
| 65,394,827
| 276
| 118
|
CC0-1.0
| 2023-09-14T20:33:15
| 2016-08-10T15:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 667
|
py
|
year_limited_downloads.py
|
from usaspending_api.download.v2.base_download_viewset import BaseDownloadViewSet
from usaspending_api.download.v2.request_validations import AwardDownloadValidator
class YearLimitedDownloadViewSet(BaseDownloadViewSet):
"""
This route sends a request to the backend to begin generating a zipfile of award data in CSV form for download.
"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/bulk_download/awards.md"
def post(self, request):
request.data["constraint_type"] = "year"
return BaseDownloadViewSet.post(
self, request, origination="bulk_download", validator_type=AwardDownloadValidator
)
|
67285f1b29d78ecd6c225509f62129acbdaaa67b
|
9c774a31ff1e98a6366e71e54e84ea97e6f050a2
|
/tables/misc/proxydict.py
|
df288ef4cb3051cf1afb99c4706d4f758b976145
|
[
"BSD-3-Clause"
] |
permissive
|
PyTables/PyTables
|
65c355d47d68b5e8f4240fc7cc32906c3b6f2eea
|
f3817d7637b465de1a2ab5da9dffd3aba185c331
|
refs/heads/master
| 2023-08-30T02:46:42.212028
| 2023-08-18T05:32:06
| 2023-08-18T05:32:06
| 1,844,194
| 1,076
| 267
|
BSD-3-Clause
| 2023-09-09T06:56:39
| 2011-06-03T19:44:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
proxydict.py
|
"""Proxy dictionary for objects stored in a container."""
import weakref
class ProxyDict(dict):
"""A dictionary which uses a container object to store its values."""
def __init__(self, container):
self.containerref = weakref.ref(container)
"""A weak reference to the container object.
.. versionchanged:: 3.0
The *containerRef* attribute has been renamed into
*containerref*.
"""
def __getitem__(self, key):
if key not in self:
raise KeyError(key)
# Values are not actually stored to avoid extra references.
return self._get_value_from_container(self._get_container(), key)
def __setitem__(self, key, value):
# Values are not actually stored to avoid extra references.
super().__setitem__(key, None)
def __repr__(self):
return object.__repr__(self)
def __str__(self):
# C implementation does not use `self.__getitem__()`. :(
return '{' + ", ".join("{k!r}: {v!r}" for k, v in self.items()) + '}'
def values(self):
# C implementation does not use `self.__getitem__()`. :(
return [self[key] for key in self.keys()]
def itervalues(self):
# C implementation does not use `self.__getitem__()`. :(
for key in self.keys():
yield self[key]
def items(self):
# C implementation does not use `self.__getitem__()`. :(
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
# C implementation does not use `self.__getitem__()`. :(
for key in self.keys():
yield (key, self[key])
def _get_container(self):
container = self.containerref()
if container is None:
raise ValueError("the container object does no longer exist")
return container
|
a121703a2f8d2f991e3c15bd59e475ea36603290
|
e62c8ee151671b999c6720ab8c2aa2f96c0d7f55
|
/src/dependency_injector/ext/__init__.py
|
26e0e04b3493eda2e2636ab07303926100280a66
|
[] |
permissive
|
ets-labs/python-dependency-injector
|
45645973456bb6494386ad12103d06e1f1be2cd8
|
cc2304e46e054ae08dc12995428759fbfb51af10
|
refs/heads/master
| 2023-08-23T03:59:53.509743
| 2022-12-19T03:14:24
| 2022-12-19T03:14:24
| 28,774,758
| 3,217
| 273
|
BSD-3-Clause
| 2023-09-08T21:46:18
| 2015-01-04T13:23:05
|
Python
|
UTF-8
|
Python
| false
| false
| 26
|
py
|
__init__.py
|
"""Extensions package."""
|
77b38ffb9f6aced0e09a097cefb4eb69c5d83e3b
|
1aa4a01014ff5408c8979d2ee91435515a376bcb
|
/src/ui/SWMM/Test_Run_SWMM.py
|
27003926b929c60ca2cd9c1a42500267979e7ebb
|
[] |
no_license
|
USEPA/SWMM-EPANET_User_Interface
|
49b41b27bfcf7a934203935ccac3cee2ed7c538c
|
d49a589fc923c716c9ff607228282073126ce6cc
|
refs/heads/dev-ui-py3qt5
| 2022-10-06T14:55:55.322050
| 2022-09-26T19:25:09
| 2022-09-26T19:25:09
| 48,242,880
| 121
| 77
| null | 2020-09-15T15:18:32
| 2015-12-18T15:41:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,886
|
py
|
Test_Run_SWMM.py
|
import os, sys
import sip
for typ in ["QString","QVariant", "QDate", "QDateTime", "QTextStream", "QTime", "QUrl"]:
sip.setapi(typ, 2)
from Externals.swmm.model.swmm5 import pyswmm
import Externals.swmm.outputapi.SMOutputSWIG as SMO
from core.project_base import ProjectBase
from core.swmm.swmm_project import SwmmProject as Project
from core.swmm.inp_reader_project import ProjectReader
from datetime import *
inp_file_name = 'C:\Data\SWMM\Example1.inp'
status_file_name = 'C:\Data\SWMM\Example1.rpt'
output_filename = 'C:\Data\SWMM\Example1.out'
model_path = '..\..\Externals\swmm\model\swmm5_x64.dll'
model_api = pyswmm(inp_file_name, status_file_name, output_filename, model_path)
_last_displayed_days = -1
project = Project()
project_reader = ProjectReader()
project_reader.read_file(project, inp_file_name)
if project_reader.input_err_msg:
print('problem reading input file')
def compute_total_days(project):
# Compute the simulation duration in days from the Project's simulation options.
try:
end_date = datetime.strptime(project.options.dates.end_date + ' ' +
project.options.dates.end_time, "%m/%d/%Y %H:%M:%S")
start_date = datetime.strptime(project.options.dates.start_date + ' ' +
project.options.dates.start_time, "%m/%d/%Y %H:%M:%S")
return (end_date - start_date).days
except:
return 0.0
# self.add_map_constituents()
print("Running SWMM " + str(model_api.swmm_getVersion()))
model_api.swmm_run()
model_api.swmm_open()
model_api.swmm_start()
date_updated = datetime.now()
total_days = compute_total_days(project)
while model_api.errcode == 0:
elapsed_days = model_api.swmm_step()
if elapsed_days > 0:
if total_days:
date_now = datetime.now()
if (date_now - date_updated).microseconds > 100000:
# update_progress_days(elapsed_days, total_days)
# update_progress_bar(round(elapsed_days), total_days)
date_updated = date_now
else:
model_api.swmm_end()
break
ErrRunoff, ErrFlow, ErrQual = model_api.swmm_getMassBalErr()
if model_api.Errflag:
print("\n\nSWMM completed. There are errors.\n")
# set_status(RunStatus.rsError)
elif model_api.Warnflag:
print("\n\nSWMM completed. There are warnings.\n")
# set_status(RunStatus.rsWarning)
else:
print("\n\nSWMM completed.\n")
# set_status(RunStatus.rsSuccess)
if os.path.isfile(output_filename):
# model_api.swmm_end()
# self.model_api.swmm_close()
# self.model_api = None
# QMessageBox.information(None, "SWMM", "pause", QMessageBox.Ok)
try:
output = SMO.SwmmOutputObject(output_filename)
output.build_units_dictionary()
print("output file is good")
except Exception as ex:
print("problem access output")
|
788813c1a36f6cf7812b7e3784fd2a7d62e06594
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CommonTools/ParticleFlow/python/GeneratorTools/sortGenParticles_cff.py
|
22d1cc0b892f5a0751223f10fb84f51ca2949921
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
sortGenParticles_cff.py
|
import FWCore.ParameterSet.Config as cms
from SimGeneral.HepPDTESSource.pythiapdt_cfi import *
decaysFromZs = cms.EDProducer(
"GenParticlePruner",
src = cms.InputTag("genParticles"),
select = cms.vstring(
"drop * ", # this is the default
"keep+ pdgId = {Z0}",
"drop pdgId = {Z0}"
)
)
sortGenParticlesSequence = cms.Sequence(
decaysFromZs
)
|
4efd5eeb72080492bba55ecc09f37382497f9cc6
|
6415c13547e6943f7b65337cbd2790c4e18723c8
|
/netbox/core/api/serializers.py
|
4117a609cb6043da83a936a6283f9d60063498ed
|
[
"Apache-2.0"
] |
permissive
|
netbox-community/netbox
|
287254a9698270d51f57b1297118e9f01536da5a
|
506884bc4dc70299db3e2a7ad577dd7fd808065e
|
refs/heads/develop
| 2023-08-24T09:11:46.685121
| 2023-08-23T18:44:14
| 2023-08-23T18:44:14
| 52,796,596
| 8,122
| 1,817
|
Apache-2.0
| 2023-09-14T18:16:01
| 2016-02-29T14:15:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,047
|
py
|
serializers.py
|
from rest_framework import serializers
from core.choices import *
from core.models import *
from netbox.api.fields import ChoiceField, ContentTypeField
from netbox.api.serializers import BaseModelSerializer, NetBoxModelSerializer
from users.api.nested_serializers import NestedUserSerializer
from .nested_serializers import *
__all__ = (
'DataFileSerializer',
'DataSourceSerializer',
'JobSerializer',
)
class DataSourceSerializer(NetBoxModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='core-api:datasource-detail'
)
type = ChoiceField(
choices=DataSourceTypeChoices
)
status = ChoiceField(
choices=DataSourceStatusChoices,
read_only=True
)
# Related object counts
file_count = serializers.IntegerField(
read_only=True
)
class Meta:
model = DataSource
fields = [
'id', 'url', 'display', 'name', 'type', 'source_url', 'enabled', 'status', 'description', 'comments',
'parameters', 'ignore_rules', 'created', 'last_updated', 'file_count',
]
class DataFileSerializer(NetBoxModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='core-api:datafile-detail'
)
source = NestedDataSourceSerializer(
read_only=True
)
class Meta:
model = DataFile
fields = [
'id', 'url', 'display', 'source', 'path', 'last_updated', 'size', 'hash',
]
class JobSerializer(BaseModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='core-api:job-detail')
user = NestedUserSerializer(
read_only=True
)
status = ChoiceField(choices=JobStatusChoices, read_only=True)
object_type = ContentTypeField(
read_only=True
)
class Meta:
model = Job
fields = [
'id', 'url', 'display', 'object_type', 'object_id', 'name', 'status', 'created', 'scheduled', 'interval',
'started', 'completed', 'user', 'data', 'job_id',
]
|
93f1c3cc7c8d673714fb5b8ff6bb5ef6e39c1a0d
|
7af0ff378525ef6132f74bac0b1eb54ce4c40c08
|
/indico/core/plugins/controllers.py
|
3103f41cd726e9349eda36a98fe9dded6377a2c2
|
[
"MIT"
] |
permissive
|
indico/indico
|
1126ee0ac3e9d36510a64989ce71be9c02680831
|
463951511d3a8409f944f98f29875c4323f3e897
|
refs/heads/master
| 2023-08-31T11:15:00.092526
| 2023-08-30T11:07:25
| 2023-08-30T11:07:25
| 2,113,067
| 1,549
| 429
|
MIT
| 2023-09-13T20:09:56
| 2011-07-27T13:56:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,589
|
py
|
controllers.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import defaultdict
from operator import attrgetter
from flask import flash, request
from werkzeug.exceptions import NotFound
from indico.core.plugins import PluginCategory, plugin_engine
from indico.core.plugins.views import WPPlugins
from indico.modules.admin import RHAdminBase
from indico.util.i18n import _
from indico.web.flask.util import redirect_or_jsonify, url_for
from indico.web.forms.base import FormDefaults
class RHPluginsBase(RHAdminBase):
pass
class RHPlugins(RHPluginsBase):
def _process(self):
plugins = [p for p in plugin_engine.get_active_plugins().values()]
categories = defaultdict(list)
other = []
for plugin in plugins:
if plugin.category:
categories[plugin.category].append(plugin)
else:
other.append(plugin)
# Sort the plugins of each category in alphabetic order and in a way that the internal plugins are always
# listed in the front
for category in categories:
categories[category].sort(key=attrgetter('configurable', 'title'))
ordered_categories = dict(sorted(categories.items()))
if other:
ordered_categories[PluginCategory.other] = sorted(other, key=attrgetter('configurable', 'title'))
return WPPlugins.render_template('index.html', categorized_plugins=ordered_categories)
class RHPluginDetails(RHPluginsBase):
back_button_endpoint = 'plugins.index'
def _process_args(self):
self.plugin = plugin_engine.get_plugin(request.view_args['plugin'])
if not self.plugin or not self.plugin.configurable:
raise NotFound
def _process(self):
plugin = self.plugin
form = None
with plugin.plugin_context():
if plugin.settings_form:
defaults = FormDefaults(**plugin.settings.get_all())
form = plugin.settings_form(obj=defaults)
if form.validate_on_submit():
plugin.settings.set_multi(form.data)
flash(_('Settings saved ({0})').format(plugin.title), 'success')
return redirect_or_jsonify(request.url)
return WPPlugins.render_template('details.html', plugin=plugin, form=form,
back_url=url_for(self.back_button_endpoint))
|
1ff3f70fc354a11b6cc4c0520c9d8e3fdfc2f231
|
6c88b2cea38b2cead9e2402d46a8fc64949c53df
|
/pkg/codegen/testing/test/testdata/output-funcs/python/pulumi_mypkg/list_storage_account_keys.py
|
823548345bece10124e2a2e9738625a1b3459f39
|
[
"Apache-2.0"
] |
permissive
|
pulumi/pulumi
|
a9b36c32f0cdd445c22f9ca64ce26c9ae5147575
|
46e2753d02d46a1c077930eeccdfe6738f46c0d2
|
refs/heads/master
| 2023-08-19T10:25:49.849189
| 2023-08-16T04:59:07
| 2023-08-16T04:59:07
| 72,477,752
| 17,553
| 1,082
|
Apache-2.0
| 2023-09-14T21:05:35
| 2016-10-31T21:02:47
|
Go
|
UTF-8
|
Python
| false
| false
| 3,628
|
py
|
list_storage_account_keys.py
|
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'ListStorageAccountKeysResult',
'AwaitableListStorageAccountKeysResult',
'list_storage_account_keys',
'list_storage_account_keys_output',
]
@pulumi.output_type
class ListStorageAccountKeysResult:
"""
The response from the ListKeys operation.
"""
def __init__(__self__, keys=None):
if keys and not isinstance(keys, list):
raise TypeError("Expected argument 'keys' to be a list")
pulumi.set(__self__, "keys", keys)
@property
@pulumi.getter
def keys(self) -> Sequence['outputs.StorageAccountKeyResponse']:
"""
Gets the list of storage account keys and their properties for the specified storage account.
"""
return pulumi.get(self, "keys")
class AwaitableListStorageAccountKeysResult(ListStorageAccountKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListStorageAccountKeysResult(
keys=self.keys)
def list_storage_account_keys(account_name: Optional[str] = None,
expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListStorageAccountKeysResult:
"""
The response from the ListKeys operation.
API Version: 2021-02-01.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str expand: Specifies type of the key to be listed. Possible value is kerb.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('mypkg::listStorageAccountKeys', __args__, opts=opts, typ=ListStorageAccountKeysResult).value
return AwaitableListStorageAccountKeysResult(
keys=pulumi.get(__ret__, 'keys'))
@_utilities.lift_output_func(list_storage_account_keys)
def list_storage_account_keys_output(account_name: Optional[pulumi.Input[str]] = None,
expand: Optional[pulumi.Input[Optional[str]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListStorageAccountKeysResult]:
"""
The response from the ListKeys operation.
API Version: 2021-02-01.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str expand: Specifies type of the key to be listed. Possible value is kerb.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
...
|
a288d7dcdb592a42ed18eacfa54ae6869bb54fa7
|
e7bf1ff05319acc59bba5af5890041bd82c3e197
|
/mne/_fiff/tests/test_proc_history.py
|
185537d33c0cd9213f9e97f5ecd332632e908f94
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mne-tools/mne-python
|
7e8d7e945dfbbee6432a4955cf050fa823f2d34b
|
f44636f00666b8eb869417960926d01690ff4f42
|
refs/heads/main
| 2023-09-04T03:05:37.402100
| 2023-09-03T14:15:18
| 2023-09-03T14:15:18
| 1,301,584
| 2,437
| 1,418
|
BSD-3-Clause
| 2023-09-14T19:23:38
| 2011-01-28T03:31:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,437
|
py
|
test_proc_history.py
|
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# License: Simplified BSD
from pathlib import Path
import numpy as np
from numpy.testing import assert_array_equal
from mne.io import read_info
from mne._fiff.constants import FIFF
base_dir = Path(__file__).parent.parent.parent / "io" / "tests" / "data"
raw_fname = base_dir / "test_chpi_raw_sss.fif"
def test_maxfilter_io():
"""Test maxfilter io."""
info = read_info(raw_fname)
mf = info["proc_history"][1]["max_info"]
assert mf["sss_info"]["frame"] == FIFF.FIFFV_COORD_HEAD
# based on manual 2.0, rev. 5.0 page 23
assert 5 <= mf["sss_info"]["in_order"] <= 11
assert mf["sss_info"]["out_order"] <= 5
assert mf["sss_info"]["nchan"] > len(mf["sss_info"]["components"])
assert (
info["ch_names"][: mf["sss_info"]["nchan"]] == mf["sss_ctc"]["proj_items_chs"]
)
assert mf["sss_ctc"]["decoupler"].shape == (
mf["sss_info"]["nchan"],
mf["sss_info"]["nchan"],
)
assert_array_equal(
np.unique(np.diag(mf["sss_ctc"]["decoupler"].toarray())),
np.array([1.0], dtype=np.float32),
)
assert mf["sss_cal"]["cal_corrs"].shape == (306, 14)
assert mf["sss_cal"]["cal_chans"].shape == (306, 2)
vv_coils = [v for k, v in FIFF.items() if "FIFFV_COIL_VV" in k]
assert all(k in vv_coils for k in set(mf["sss_cal"]["cal_chans"][:, 1]))
|
32df857d03e714de28d40edc4ac961eb77e56d87
|
67cc5db4593e2cdd109e589e13fb07074bcff5d9
|
/tests/transformations/gpu_grid_stride_tiling_test.py
|
4418054cc9ae5025af4189781fd6d9d8f92ac22c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spcl/dace
|
39849b1488e8f59f880fc0e2572687556c51847d
|
c5ca99ad37e7ceef6da71026c3c8bb579f64117f
|
refs/heads/master
| 2023-08-31T10:45:09.480018
| 2023-08-30T06:05:10
| 2023-08-30T06:05:10
| 172,703,996
| 402
| 114
|
BSD-3-Clause
| 2023-09-14T15:18:29
| 2019-02-26T12:05:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,590
|
py
|
gpu_grid_stride_tiling_test.py
|
# Copyright 2019-2023 ETH Zurich and the DaCe authors. All rights reserved.
"""Tests for GPU grid-strided tiling transformation."""
from typing import List, Tuple
import pytest
import dace
from dace.transformation.dataflow import TrivialTaskletElimination, GPUGridStridedTiling
import numpy as np
import scipy.sparse as sparse
def find_map_entry(sdfg: dace.SDFG, map_name_list: List[str]) -> Tuple[dace.sdfg.nodes.MapEntry]:
if isinstance(map_name_list, str):
map_name_list = [
map_name_list,
]
ret_list = [None] * len(map_name_list)
for state in sdfg.states():
for node in state.nodes():
if isinstance(node, dace.sdfg.nodes.MapEntry):
for i, map_name in enumerate(map_name_list):
if map_name == node.map.params[0]:
ret_list[i] = node
# check if all map entries are found
assert all([x is not None for x in ret_list])
# unpack if only one map entry is found
if len(ret_list) == 1:
return ret_list[0]
else:
return tuple(ret_list)
@pytest.mark.gpu
def test_gpu_grid_stride_tiling():
M = 300
N = 300
@dace.program
def dummy(A: dace.float32[M, N], B: dace.float32[M, N]):
for i in dace.map[0:M]:
for j in dace.map[0:N]:
A[i, j] = B[i, j] + 1.0
sdfg = dummy.to_sdfg()
sdfg.simplify()
ime, jme = find_map_entry(sdfg, ["i", "j"])
sdfg.apply_transformations_repeated(TrivialTaskletElimination)
sdfg.apply_gpu_transformations()
GPUGridStridedTiling.apply_to(sdfg, outer_map_entry=ime, inner_map_entry=jme)
sdfg.validate()
B = np.random.rand(M, N).astype(np.float32)
A_ref = np.zeros((M, N), dtype=np.float32)
A_test = np.zeros((M, N), dtype=np.float32)
A_ref = B + 1.0
sdfg(A=A_test, B=B)
assert np.allclose(A_ref, A_test)
@pytest.mark.gpu
def test_gpu_grid_stride_tiling_with_indirection():
M = 300
N = 300
K = 300
density = 0.01
dtype = np.float32
A = sparse.random(M, N, density=density, format='csr', dtype=dtype)
nnz = A.nnz
B = np.random.rand(M, K).astype(dtype)
C = np.random.rand(K, N).astype(dtype)
D_test = np.zeros_like(A.data)
D_ref = np.zeros_like(A.data)
@dace.program
def sddmm(D_vals: dace.float32[nnz], A2_crd: dace.int32[nnz], A2_pos: dace.int32[M + 1], A_vals: dace.float32[nnz],
B: dace.float32[M, K], C: dace.float32[K, N]):
for i in dace.map[0:M]:
for j in dace.map[A2_pos[i]:A2_pos[i + 1]]:
for k in dace.map[0:K]:
D_vals[j] += A_vals[j] * B[i, k] * C[k, A2_crd[j]]
sdfg = sddmm.to_sdfg()
sdfg.simplify()
ime, jme, _ = find_map_entry(sdfg, ["i", "j", "k"])
sdfg.apply_transformations_repeated(TrivialTaskletElimination)
sdfg.apply_gpu_transformations()
GPUGridStridedTiling.apply_to(sdfg, outer_map_entry=ime, inner_map_entry=jme)
for e, _ in sdfg.all_edges_recursive():
if isinstance(e.data, dace.Memlet) and e.data.wcr:
e.data.wcr_nonatomic = True
sdfg.validate()
# reference
for i in range(M):
for j in range(A.indptr[i], A.indptr[i + 1]):
D_ref[j] += A.data[j] * (B[i, :] @ C[:, A.indices[j]])
sdfg(A_vals=np.copy(A.data),
A2_crd=np.copy(A.indices),
A2_pos=A.indptr,
B=B,
C=C,
D_vals=D_test)
assert np.allclose(D_ref, D_test)
if __name__ == '__main__':
test_gpu_grid_stride_tiling()
test_gpu_grid_stride_tiling_with_indirection()
|
c2b94179d4c6e64f4d6dfdc5fa5b07dd239ff870
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-core/huaweicloudsdkcore/signer/hkdf.py
|
55ce03e8d6145196742784f8274ebac86517ec95
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,711
|
py
|
hkdf.py
|
# coding: utf-8
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache LICENSE, Version 2.0 (the
"LICENSE"); you may not use this file except in compliance
with the LICENSE. You may obtain a copy of the LICENSE at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the LICENSE is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the LICENSE for the
specific language governing permissions and limitations
under the LICENSE.
"""
from __future__ import absolute_import
import hashlib
import math
import hmac
import six
import copy
import binascii
from huaweicloudsdkcore.exceptions.exceptions import SdkException
def get_der_key_sha256(access_key, secret_key, info):
if not access_key or not secret_key:
return None
try:
tmp_key = extract(secret_key, access_key, HMAC_ALGORITHM)
der_secret_key = expand(tmp_key, info.encode(UTF_8), HMAC_ALGORITHM, DERIVATION_KEY_LENGTH, EXPAND_CEIL)
if der_secret_key is not None:
return binascii.hexlify(der_secret_key).decode()
return None
except Exception as e:
raise e
def get_hash_len(hmac_algorithm):
if hmac_algorithm == HMAC_SHA1:
return 20
elif hmac_algorithm == HMAC_SHA256:
return 32
else:
return 32
def extract(ikm, salt, hmac_algorithm):
if not salt:
salt = bytes(get_hash_len(hmac_algorithm)).decode(encoding=UTF_8)
return hmac_sha256(salt, ikm, hmac_algorithm)
def expand(prk, info, hmac_algorithm, okm_len, ceil):
if ceil == 1:
raw_result = expand_first(prk, info, hmac_algorithm)
else:
raw_result = bytes()
tmp = bytes()
for i in range(1, ceil + 1):
tmp = expand_once(prk, info, tmp, i, hmac_algorithm)
raw_result = raw_result + tmp
if okm_len == len(raw_result):
return raw_result
elif okm_len < len(raw_result):
return raw_result[:okm_len + 1]
else:
return None
def expand_first(prk, info, hmac_algorithm):
result = copy.deepcopy(info)
result = result + bytearray((1,))
return hmac_sha256(prk, result, hmac_algorithm)
def expand_once(prk, info, pre_tmp, i, hmac_algorithm):
result = pre_tmp + info + bytearray((i,))
return hmac_sha256(prk, result, hmac_algorithm)
def _get_expand_ceil(derivation_key_len, algorithm_hash_len):
try:
return int(math.ceil(float(derivation_key_len) / float(algorithm_hash_len)))
except ZeroDivisionError as e:
raise ValueError(e)
HMAC_SHA1 = hashlib.sha1
HMAC_SHA256 = hashlib.sha256
DERIVATION_KEY_LENGTH = 32
HMAC_ALGORITHM = HMAC_SHA256
ALGORITHM_HASH_LENGTH = get_hash_len(HMAC_ALGORITHM)
UTF_8 = "utf-8"
EXPAND_CEIL = _get_expand_ceil(DERIVATION_KEY_LENGTH, ALGORITHM_HASH_LENGTH)
if six.PY2:
def hmac_sha256(key_byte, message, hmac_algorithm):
return hmac.new(key_byte, message, digestmod=hmac_algorithm).digest()
else:
def hmac_sha256(key_byte, message, hmac_algorithm):
if isinstance(key_byte, str) and isinstance(message, str):
return hmac.new(key_byte.encode(UTF_8), message.encode(UTF_8), digestmod=hmac_algorithm).digest()
elif isinstance(key_byte, bytes) and isinstance(message, bytes):
return hmac.new(key_byte, message, digestmod=hmac_algorithm).digest()
else:
raise SdkException()
|
9b313b94f437ee01729c18252e147a976743f6d7
|
e03bce53de6f88c0e09f56e4fe11c36af0f1161f
|
/runway/variables.py
|
37a1f34bbcfd6c78ebe7a81f44ffad237e4fe0e2
|
[
"Apache-2.0"
] |
permissive
|
onicagroup/runway
|
20c31df9cbc1a1ffc5c9aa468ce5cf7d6ac7899f
|
0763b06aee07d2cf3f037a49ca0cb81a048c5deb
|
refs/heads/master
| 2023-08-30T22:35:54.113981
| 2023-08-29T14:13:35
| 2023-08-29T14:13:35
| 122,529,924
| 156
| 79
|
Apache-2.0
| 2023-09-13T13:43:50
| 2018-02-22T20:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 29,955
|
py
|
variables.py
|
"""Runway variables."""
from __future__ import annotations
import logging
import re
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generic,
Iterable,
Iterator,
List,
MutableMapping,
MutableSequence,
Optional,
Set,
Type,
TypeVar,
Union,
cast,
overload,
)
from pydantic import BaseModel
from typing_extensions import Literal
from .cfngin.lookups.registry import CFNGIN_LOOKUP_HANDLERS
from .exceptions import (
FailedLookup,
FailedVariableLookup,
InvalidLookupConcatenation,
UnknownLookupType,
UnresolvedVariable,
UnresolvedVariableValue,
)
from .lookups.handlers.base import LookupHandler
from .lookups.registry import RUNWAY_LOOKUP_HANDLERS
if TYPE_CHECKING:
from .cfngin.providers.aws.default import Provider
from .config.components.runway import RunwayVariablesDefinition
from .context import CfnginContext, RunwayContext
LOGGER = logging.getLogger(__name__)
_LiteralValue = TypeVar("_LiteralValue", int, str)
_PydanticModelTypeVar = TypeVar("_PydanticModelTypeVar", bound=BaseModel)
VariableTypeLiteralTypeDef = Literal["cfngin", "runway"]
class Variable:
"""Represents a variable provided to a Runway directive."""
name: str
def __init__(
self,
name: str,
value: Any,
variable_type: VariableTypeLiteralTypeDef = "cfngin",
) -> None:
"""Initialize class.
Args:
name: Name of the variable (directive/key).
value: The variable itself.
variable_type: Type of variable (cfngin|runway).
"""
self.name = name
self._raw_value = value
self._value = VariableValue.parse_obj(value, variable_type)
self.variable_type = variable_type
@property
def dependencies(self) -> Set[str]:
"""Stack names that this variable depends on.
Returns:
Set[str]: Stack names that this variable depends on.
"""
return self._value.dependencies
@property
def resolved(self) -> bool:
"""Boolean for whether the Variable has been resolved.
Variables only need to be resolved if they contain lookups.
"""
return self._value.resolved
@property
def value(self) -> Any:
"""Return the current value of the Variable.
Raises:
UnresolvedVariable: Value accessed before it have been resolved.
"""
try:
return self._value.value
except UnresolvedVariableValue:
raise UnresolvedVariable(self) from None
def resolve(
self,
context: Union[CfnginContext, RunwayContext],
provider: Optional[Provider] = None,
variables: Optional[RunwayVariablesDefinition] = None,
**kwargs: Any,
) -> None:
"""Resolve the variable value.
Args:
context: The current context object.
provider: Subclass of the base provider.
variables: Object containing variables passed to Runway.
Raises:
FailedVariableLookup
"""
try:
self._value.resolve(
context, provider=provider, variables=variables, **kwargs
)
except FailedLookup as err:
raise FailedVariableLookup(self, err) from err.cause
def get(self, key: str, default: Any = None) -> Any:
"""Implement evaluation of self.get.
Args:
key: Attribute name to return the value for.
default: Value to return if attribute is not found.
"""
return getattr(self.value, key, default)
def __repr__(self) -> str:
"""Return object representation."""
return f"Variable[{self.name}={self._raw_value}]"
def resolve_variables(
variables: List[Variable],
context: Union[CfnginContext, RunwayContext],
provider: Optional[Provider] = None,
) -> None:
"""Given a list of variables, resolve all of them.
Args:
variables: List of variables.
context: CFNgin context.
provider: Subclass of the base provider.
"""
for variable in variables:
variable.resolve(context=context, provider=provider)
_VariableValue = TypeVar("_VariableValue", bound="VariableValue")
class VariableValue:
"""Syntax tree base class to parse variable values."""
_resolved: bool = False
_data: Any
variable_type: VariableTypeLiteralTypeDef
@property
def dependencies(self) -> Set[Any]:
"""Stack names that this variable depends on."""
return set()
@property
def resolved(self) -> bool:
"""Use to check if the variable value has been resolved.
Raises:
NotImplementedError: Should be defined in a subclass.
"""
raise NotImplementedError
@property
def simplified(self) -> Any:
"""Return a simplified version of the value.
This can be used to concatenate two literals into one literal or
flatten nested concatenations.
Should be implimented in subclasses where applicable.
"""
return self
@property
def value(self) -> Any:
"""Value of the variable. Can be resolved or unresolved.
Raises:
NotImplementedError: Should be defined in a subclass.
"""
raise NotImplementedError
def resolve(
self,
context: Union[CfnginContext, RunwayContext],
provider: Optional[Provider] = None,
variables: Optional[RunwayVariablesDefinition] = None,
**kwargs: Any,
) -> None:
"""Resolve the variable value.
Args:
context: The current context object.
provider: Subclass of the base provider.
variables: Object containing variables passed to Runway.
"""
def _resolve(self, value: Any) -> None:
"""Set _value and _resolved from the result of resolve().
Args:
value: Resolved value of the variable.
"""
self._data = value
self._resolved = True
@overload
@classmethod
def parse_obj(
cls, obj: _PydanticModelTypeVar, variable_type: VariableTypeLiteralTypeDef = ...
) -> VariableValuePydanticModel[_PydanticModelTypeVar]:
...
@overload
@classmethod
def parse_obj(
cls, obj: Dict[str, Any], variable_type: VariableTypeLiteralTypeDef = ...
) -> VariableValue:
...
@overload
@classmethod
def parse_obj(
cls, obj: List[Any], variable_type: VariableTypeLiteralTypeDef = ...
) -> VariableValueList:
...
@overload
@classmethod
def parse_obj(
cls, obj: int, variable_type: VariableTypeLiteralTypeDef = ...
) -> VariableValueLiteral[int]:
...
@overload
@classmethod
def parse_obj(
cls, obj: str, variable_type: VariableTypeLiteralTypeDef = ...
) -> VariableValueConcatenation[
Union[VariableValueLiteral[str], VariableValueLookup]
]:
...
@classmethod
def parse_obj(
cls, obj: Any, variable_type: VariableTypeLiteralTypeDef = "cfngin"
) -> VariableValue:
"""Parse complex variable structures using type appropriate subclasses.
Args:
obj: The objected defined as the value of a variable.
variable_type: Type of variable (cfngin|runway).
"""
if isinstance(obj, BaseModel):
return VariableValuePydanticModel(obj, variable_type=variable_type) # type: ignore
if isinstance(obj, dict):
return VariableValueDict(obj, variable_type=variable_type) # type: ignore
if isinstance(obj, list):
return VariableValueList(obj, variable_type=variable_type) # type: ignore
if not isinstance(obj, str):
return VariableValueLiteral(obj, variable_type=variable_type) # type: ignore
tokens: VariableValueConcatenation[
Union[VariableValueLiteral[str], VariableValueLookup]
] = VariableValueConcatenation(
# pyright 1.1.138 is having issues properly inferring the type from comprehension
[ # type: ignore
VariableValueLiteral(cast(str, t), variable_type=variable_type)
for t in re.split(r"(\$\{|\}|\s+)", obj) # ${ or space or }
]
)
opener = "${"
closer = "}"
while True:
last_open = None
next_close = None
for i, tok in enumerate(tokens):
if not isinstance(tok, VariableValueLiteral):
continue
if tok.value == opener:
last_open = i
next_close = None
if last_open is not None and tok.value == closer and next_close is None:
next_close = i
if next_close is not None:
lookup_query = VariableValueConcatenation(
tokens[(cast(int, last_open) + len(opener) + 1) : next_close],
variable_type=variable_type,
)
lookup = VariableValueLookup(
lookup_name=tokens[cast(int, last_open) + 1], # type: ignore
lookup_query=lookup_query,
variable_type=variable_type,
)
tokens[last_open : (next_close + 1)] = [lookup] # type: ignore
else:
break # cov: ignore
return tokens.simplified
def __iter__(self) -> Iterator[Any]:
"""How the object is iterated.
Raises:
NotImplementedError: Should be defined in a subclass.
"""
raise NotImplementedError
def __repr__(self) -> str:
"""Return object representation.
Raises:
NotImplementedError: Should be defined in a subclass.
"""
raise NotImplementedError
class VariableValueDict(VariableValue, MutableMapping[str, VariableValue]):
"""A dict variable value."""
def __init__(
self, data: Dict[str, Any], variable_type: VariableTypeLiteralTypeDef = "cfngin"
) -> None:
"""Instantiate class.
Args:
data: Data to be stored in the object.
variable_type: Type of variable (cfngin|runway).
"""
self._data = {
k: self.parse_obj(v, variable_type=variable_type) for k, v in data.items()
}
self.variable_type: VariableTypeLiteralTypeDef = variable_type
@property
def dependencies(self) -> Set[str]:
"""Stack names that this variable depends on."""
deps: Set[str] = set()
for item in self.values():
deps.update(item.dependencies)
return deps
@property
def resolved(self) -> bool:
"""Use to check if the variable value has been resolved."""
accumulator: bool = True
for item in self.values():
accumulator = accumulator and item.resolved
return accumulator
@property
def simplified(self) -> Dict[str, Any]:
"""Return a simplified version of the value.
This can be used to concatenate two literals into one literal or
flatten nested concatenations.
"""
return {k: v.simplified for k, v in self.items()}
@property
def value(self) -> Dict[str, Any]:
"""Value of the variable. Can be resolved or unresolved."""
return {k: v.value for k, v in self.items()}
def resolve(
self,
context: Union[CfnginContext, RunwayContext],
provider: Optional[Provider] = None,
variables: Optional[RunwayVariablesDefinition] = None,
**kwargs: Any,
) -> None:
"""Resolve the variable value.
Args:
context: The current context object.
provider: Subclass of the base provider.
variables: Object containing variables passed to Runway.
"""
for item in self.values():
item.resolve(context, provider=provider, variables=variables, **kwargs)
def __delitem__(self, __key: str) -> None:
"""Delete item by index."""
del self._data[__key]
def __getitem__(self, __key: str) -> VariableValue:
"""Get item by index."""
return self._data[__key]
def __iter__(self) -> Iterator[str]:
"""How the object is iterated."""
yield from iter(self._data)
def __len__(self) -> int:
"""Length of the object."""
return len(self._data)
def __repr__(self) -> str:
"""Return object representation."""
return f"Dict[{', '.join(f'{k}={v}' for k, v in self.items())}]"
def __setitem__(self, __key: str, __value: VariableValue) -> None:
"""Set item by index."""
self._data[__key] = __value
class VariableValueList(VariableValue, MutableSequence[VariableValue]):
"""List variable value."""
def __init__(
self,
iterable: Iterable[Any],
variable_type: VariableTypeLiteralTypeDef = "cfngin",
) -> None:
"""Instantiate class.
Args:
iterable: Data to store in the iterable.
variable_type: Type of variable (cfngin|runway).
"""
self._data: List[VariableValue] = [
self.parse_obj(i, variable_type=variable_type) for i in iterable
]
self.variable_type: VariableTypeLiteralTypeDef = variable_type
@property
def dependencies(self) -> Set[str]:
"""Stack names that this variable depends on."""
deps: Set[str] = set()
for item in self:
deps.update(item.dependencies)
return deps
@property
def resolved(self) -> bool:
"""Use to check if the variable value has been resolved."""
accumulator: bool = True
for item in self:
accumulator = accumulator and item.resolved
return accumulator
@property
def simplified(self) -> List[VariableValue]:
"""Return a simplified version of the value.
This can be used to concatenate two literals into one literal or
flatten nested concatenations.
"""
return [item.simplified for item in self]
@property
def value(self) -> List[Any]:
"""Value of the variable. Can be resolved or unresolved."""
return [item.value for item in self]
def insert(self, index: int, value: VariableValue) -> None:
"""Insert a value at a specific index."""
self._data.insert(index, value)
def resolve(
self,
context: Union[CfnginContext, RunwayContext],
provider: Optional[Provider] = None,
variables: Optional[RunwayVariablesDefinition] = None,
**kwargs: Any,
) -> None:
"""Resolve the variable value.
Args:
context: The current context object.
provider: Subclass of the base provider.
variables: Object containing variables passed to Runway.
"""
for item in self:
item.resolve(context, provider=provider, variables=variables, **kwargs)
def __delitem__(self, __index: int) -> None:
"""Delete item by index."""
del self._data[__index]
@overload
def __getitem__(self, __index: int) -> VariableValue:
...
@overload
def __getitem__(self, __index: slice) -> List[VariableValue]:
...
def __getitem__( # type: ignore
self, __index: Union[int, slice]
) -> Union[MutableSequence[VariableValue], VariableValue]:
"""Get item by index."""
return self._data[__index] # type: ignore
@overload
def __setitem__(self, __index: int, __value: VariableValue) -> None:
...
@overload
def __setitem__(self, __index: slice, __value: List[VariableValue]) -> None:
...
def __setitem__(
self,
__index: Union[int, slice],
__value: Union[List[VariableValue], VariableValue],
) -> None:
"""Set item by index."""
self._data[__index] = __value # type: ignore
def __iter__(self) -> Iterator[VariableValue]:
"""Object iteration."""
yield from iter(self._data)
def __len__(self) -> int:
"""Length of the object."""
return len(self._data)
def __repr__(self) -> str:
"""Object string representation."""
return f"List[{', '.join(repr(i) for i in self._data)}]"
class VariableValueLiteral(Generic[_LiteralValue], VariableValue):
"""The literal value of a variable as provided."""
def __init__(
self, value: _LiteralValue, variable_type: VariableTypeLiteralTypeDef = "cfngin"
) -> None:
"""Instantiate class.
Args:
value: Data to store in the object.
variable_type: Type of variable (cfngin|runway).
"""
self._data = value
self.variable_type: VariableTypeLiteralTypeDef = variable_type
@property
def resolved(self) -> bool:
"""Use to check if the variable value has been resolved.
The ValueLiteral will always appear as resolved because it does
not "resolve" since it is the literal definition of the value.
"""
return True
@property
def value(self) -> _LiteralValue:
"""Value of the variable."""
return self._data
def __iter__(self) -> Iterator[Any]:
"""How the object is iterated."""
yield self
def __repr__(self) -> str:
"""Return object representation."""
return f"Literal[{self._data}]"
class VariableValueConcatenation(Generic[_VariableValue], VariableValue):
"""A concatenated variable values."""
def __init__(
self,
iterable: Iterable[_VariableValue],
variable_type: VariableTypeLiteralTypeDef = "cfngin",
) -> None:
"""Instantiate class.
Args:
iterable: Data to store in the iterable.
variable_type: Type of variable (cfngin|runway).
"""
self._data = list(iterable)
self.variable_type: VariableTypeLiteralTypeDef = variable_type
@property
def dependencies(self) -> Set[str]:
"""Stack names that this variable depends on."""
deps: Set[str] = set()
for item in self:
deps.update(item.dependencies)
return deps
@property
def resolved(self) -> bool:
"""Use to check if the variable value has been resolved."""
accumulator: bool = True
for item in self:
accumulator = accumulator and item.resolved
return accumulator
@property
def simplified(self) -> VariableValue:
"""Return a simplified version of the value.
This can be used to concatenate two literals into one literal or flatten
nested concatenations.
"""
concat: List[VariableValue] = []
for item in self:
if isinstance(item, VariableValueLiteral) and item.value == "":
pass
elif (
isinstance(item, VariableValueLiteral)
and concat
and isinstance(concat[-1], VariableValueLiteral)
):
concat[-1] = VariableValueLiteral(
str(concat[-1].value) + str(item.value) # type: ignore
)
elif isinstance(item, VariableValueConcatenation): # type: ignore
concat.extend(iter(item.simplified))
else:
concat.append(item.simplified)
if not concat:
return VariableValueLiteral("")
if len(concat) == 1:
return concat[0]
return VariableValueConcatenation(concat)
@property
def value(self) -> Any:
"""Value of the variable. Can be resolved or unresolved.
Raises:
InvalidLookupConcatenation
"""
if len(self) == 1:
return self[0].value
values: List[str] = []
for value in self:
resolved_value = value.value
if isinstance(resolved_value, bool) or not isinstance(
resolved_value, (int, str)
):
raise InvalidLookupConcatenation(value, self)
values.append(str(resolved_value))
return "".join(values)
def resolve(
self,
context: Union[CfnginContext, RunwayContext],
provider: Optional[Provider] = None,
variables: Optional[RunwayVariablesDefinition] = None,
**kwargs: Any,
) -> None:
"""Resolve the variable value.
Args:
context: The current context object.
provider: Subclass of the base provider.
variables: Object containing variables passed to Runway.
"""
for value in self:
value.resolve(context, provider=provider, variables=variables, **kwargs)
def __delitem__(self, __index: int) -> None:
"""Delete item by index."""
del self._data[__index]
@overload
def __getitem__(self, __index: int) -> _VariableValue:
...
@overload
def __getitem__(self, __index: slice) -> List[_VariableValue]:
...
def __getitem__(
self, __index: Union[int, slice]
) -> Union[List[_VariableValue], _VariableValue]:
"""Get item by index."""
return self._data[__index]
@overload
def __setitem__(self, __index: int, __value: _VariableValue) -> None:
...
@overload
def __setitem__(self, __index: slice, __value: List[_VariableValue]) -> None:
...
def __setitem__(
self,
__index: Union[int, slice],
__value: Union[List[_VariableValue], _VariableValue],
) -> None:
"""Set item by index."""
self._data[__index] = __value
def __iter__(self) -> Iterator[_VariableValue]:
"""Object iteration."""
yield from iter(self._data)
def __len__(self) -> int:
"""Length of the object."""
return len(self._data)
def __repr__(self) -> str:
"""Return object representation."""
return f"Concatenation[{', '.join(repr(v) for v in self)}]"
class VariableValueLookup(VariableValue):
"""A lookup variable value."""
handler: Type[LookupHandler]
lookup_name: VariableValueLiteral[str]
lookup_query: VariableValue
_resolved: bool
def __init__(
self,
lookup_name: VariableValueLiteral[str],
lookup_query: Union[str, VariableValue],
handler: Optional[Type[LookupHandler]] = None,
variable_type: VariableTypeLiteralTypeDef = "cfngin",
) -> None:
"""Initialize class.
Args:
lookup_name: Name of the invoked lookup.
lookup_query: Data portion of the lookup.
handler: Lookup handler that will be use to resolve the value.
variable_type: Type of variable (cfngin|runway).
Raises:
UnknownLookupType: Invalid lookup type.
ValueError: Invalid value for variable_type.
"""
self._resolved = False
self._data = None
self.lookup_name = lookup_name
self.variable_type: VariableTypeLiteralTypeDef = variable_type
if isinstance(lookup_query, str):
lookup_query = VariableValueLiteral(lookup_query)
self.lookup_query = lookup_query
if handler is None:
lookup_name_resolved = lookup_name.value
try:
if variable_type == "cfngin":
handler = CFNGIN_LOOKUP_HANDLERS[lookup_name_resolved]
elif variable_type == "runway":
handler = RUNWAY_LOOKUP_HANDLERS[lookup_name_resolved]
else:
raise ValueError(
'Variable type must be one of "cfngin" or "runway"'
)
except KeyError:
raise UnknownLookupType(self) from None
self.handler = handler
@property
def dependencies(self) -> Set[str]:
"""Stack names that this variable depends on."""
if hasattr(self.handler, "dependencies"):
return self.handler.dependencies(self.lookup_query)
return set()
@property
def resolved(self) -> bool:
"""Use to check if the variable value has been resolved."""
return self._resolved
@property
def simplified(self) -> VariableValueLookup:
"""Return a simplified version of the value.
This can be used to concatenate two literals into one literal or
flatten nested concatenations.
"""
return self
@property
def value(self) -> Any:
"""Value of the variable. Can be resolved or unresolved.
Raises:
UnresolvedVariableValue: Value accessed before it has been resolved.
"""
if self._resolved:
return self._data
raise UnresolvedVariableValue(self)
def resolve(
self,
context: Union[CfnginContext, RunwayContext],
provider: Optional[Provider] = None,
variables: Optional[RunwayVariablesDefinition] = None,
**kwargs: Any,
) -> None:
"""Resolve the variable value.
Args:
context: The current context object.
provider: Subclass of the base provider.
variables: Object containing variables passed to Runway.
Raises:
FailedLookup: A lookup failed for any reason.
"""
self.lookup_query.resolve(
context=context, provider=provider, variables=variables, **kwargs
)
try:
result = self.handler.handle(
self.lookup_query.value,
context=context,
provider=provider,
variables=variables,
**kwargs,
)
return self._resolve(result)
except Exception as err:
raise FailedLookup(self, err) from err
def __iter__(self) -> Iterator[VariableValueLookup]:
"""How the object is iterated."""
yield self
def __repr__(self) -> str:
"""Return object representation."""
if self._resolved:
return (
f"Lookup[{self._data} ({self.lookup_name} {repr(self.lookup_query)})]"
)
return f"Lookup[{self.lookup_name} {repr(self.lookup_query)}]"
def __str__(self) -> str:
"""Object displayed as a string."""
return f"${{{self.lookup_name.value} {self.lookup_query.value}}}"
class VariableValuePydanticModel(Generic[_PydanticModelTypeVar], VariableValue):
"""A pydantic model variable value."""
def __init__(
self,
data: _PydanticModelTypeVar,
variable_type: VariableTypeLiteralTypeDef = "cfngin",
) -> None:
"""Instantiate class.
Args:
data: Data to be stored in the object.
variable_type: Type of variable (cfngin|runway).
"""
self._data: Dict[str, VariableValue] = {
k: self.parse_obj(v, variable_type=variable_type) for k, v in data
}
self._model_class = type(data)
self.variable_type: VariableTypeLiteralTypeDef = variable_type
@property
def dependencies(self) -> Set[str]:
"""Stack names that this variable depends on."""
deps: Set[str] = set()
for value in self._data.values():
deps.update(value.dependencies)
return deps
@property
def resolved(self) -> bool:
"""Use to check if the variable value has been resolved."""
accumulator: bool = True
for value in self._data.values():
accumulator = accumulator and value.resolved
return accumulator
@property
def simplified(self) -> Dict[str, Any]:
"""Return a simplified version of the value.
This can be used to concatenate two literals into one literal or
flatten nested concatenations.
"""
return {field: value.simplified for field, value in self._data.items()}
@property
def value(self) -> _PydanticModelTypeVar:
"""Value of the variable. Can be resolved or unresolved.
Uses the original pydantic model class to parse the resolved data back
into a pydantic model.
"""
return self._model_class.parse_obj(
{field: value.value for field, value in self._data.items()}
)
def resolve(
self,
context: Union[CfnginContext, RunwayContext],
provider: Optional[Provider] = None,
variables: Optional[RunwayVariablesDefinition] = None,
**kwargs: Any,
) -> None:
"""Resolve the variable value.
Args:
context: The current context object.
provider: Subclass of the base provider.
variables: Object containing variables passed to Runway.
"""
for item in self._data.values():
item.resolve(context, provider=provider, variables=variables, **kwargs)
def __delitem__(self, __key: str) -> None:
"""Delete item by index."""
del self._data[__key]
def __getitem__(self, __key: str) -> VariableValue:
"""Get item by index."""
return self._data[__key]
def __iter__(self) -> Iterator[str]:
"""How the object is iterated."""
yield from iter(self._data)
def __len__(self) -> int:
"""Length of the object."""
return len(self._data)
def __repr__(self) -> str:
"""Return object representation."""
return (
self._model_class.__name__
+ f"[{', '.join(f'{k}={v}' for k, v in self._data.items())}]"
)
def __setitem__(self, __key: str, __value: VariableValue) -> None:
"""Set item by index."""
self._data[__key] = __value
|
bdcf9d227d2f439b98f880588de1208251e9e709
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/terraform/checks/resource/alicloud/RDSInstanceLogDisconnections.py
|
5500f0c2882cd2081b55d8d1b192678ef808e9e7
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 287
|
py
|
RDSInstanceLogDisconnections.py
|
from checkov.terraform.checks.resource.alicloud.AbsRDSParameter import AbsRDSParameter
class RDSInstanceLogDisconnections(AbsRDSParameter):
def __init__(self):
super().__init__(check_id="CKV_ALI_36", parameter="log_disconnections")
check = RDSInstanceLogDisconnections()
|
856f8f3a3feb59bbdbce8542cba6377336ee5326
|
afbe5adc51a1aee0467a0bba87e249d8afc77069
|
/shapely/tests/legacy/test_buffer.py
|
4ed8ce5a3af4f77bf14a98a74501272697c84ef4
|
[
"BSD-3-Clause",
"LGPL-2.1-only"
] |
permissive
|
shapely/shapely
|
0df99bfb45d01d35ac77d587b4665797541029c8
|
bc2dd229760fc2df102ab7d3c91834489cf76f1a
|
refs/heads/main
| 2023-09-04T11:20:42.376340
| 2023-08-19T21:28:11
| 2023-08-19T21:28:11
| 3,080,613
| 897
| 130
|
BSD-3-Clause
| 2023-09-12T20:38:36
| 2011-12-31T19:43:11
|
Python
|
UTF-8
|
Python
| false
| false
| 6,521
|
py
|
test_buffer.py
|
import unittest
import pytest
from shapely import geometry
from shapely.constructive import BufferCapStyle, BufferJoinStyle
from shapely.geometry.base import CAP_STYLE, JOIN_STYLE
@pytest.mark.parametrize("distance", [float("nan"), float("inf")])
def test_non_finite_distance(distance):
g = geometry.Point(0, 0)
with pytest.raises(ValueError, match="distance must be finite"):
g.buffer(distance)
class BufferTests(unittest.TestCase):
"""Test Buffer Point/Line/Polygon with and without single_sided params"""
def test_empty(self):
g = geometry.Point(0, 0)
h = g.buffer(0)
assert h.is_empty
def test_point(self):
g = geometry.Point(0, 0)
h = g.buffer(1, quad_segs=1)
assert h.geom_type == "Polygon"
expected_coord = [(1.0, 0.0), (0, -1.0), (-1.0, 0), (0, 1.0), (1.0, 0.0)]
for index, coord in enumerate(h.exterior.coords):
assert coord[0] == pytest.approx(expected_coord[index][0])
assert coord[1] == pytest.approx(expected_coord[index][1])
def test_point_single_sidedd(self):
g = geometry.Point(0, 0)
h = g.buffer(1, quad_segs=1, single_sided=True)
assert h.geom_type == "Polygon"
expected_coord = [(1.0, 0.0), (0, -1.0), (-1.0, 0), (0, 1.0), (1.0, 0.0)]
for index, coord in enumerate(h.exterior.coords):
assert coord[0] == pytest.approx(expected_coord[index][0])
assert coord[1] == pytest.approx(expected_coord[index][1])
def test_line(self):
g = geometry.LineString([[0, 0], [0, 1]])
h = g.buffer(1, quad_segs=1)
assert h.geom_type == "Polygon"
expected_coord = [
(-1.0, 1.0),
(0, 2.0),
(1.0, 1.0),
(1.0, 0.0),
(0, -1.0),
(-1.0, 0.0),
(-1.0, 1.0),
]
for index, coord in enumerate(h.exterior.coords):
assert coord[0] == pytest.approx(expected_coord[index][0])
assert coord[1] == pytest.approx(expected_coord[index][1])
def test_line_single_sideded_left(self):
g = geometry.LineString([[0, 0], [0, 1]])
h = g.buffer(1, quad_segs=1, single_sided=True)
assert h.geom_type == "Polygon"
expected_coord = [(0.0, 1.0), (0.0, 0.0), (-1.0, 0.0), (-1.0, 1.0), (0.0, 1.0)]
for index, coord in enumerate(h.exterior.coords):
assert coord[0] == pytest.approx(expected_coord[index][0])
assert coord[1] == pytest.approx(expected_coord[index][1])
def test_line_single_sideded_right(self):
g = geometry.LineString([[0, 0], [0, 1]])
h = g.buffer(-1, quad_segs=1, single_sided=True)
assert h.geom_type == "Polygon"
expected_coord = [(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)]
for index, coord in enumerate(h.exterior.coords):
assert coord[0] == pytest.approx(expected_coord[index][0])
assert coord[1] == pytest.approx(expected_coord[index][1])
def test_polygon(self):
g = geometry.Polygon([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])
h = g.buffer(1, quad_segs=1)
assert h.geom_type == "Polygon"
expected_coord = [
(-1.0, 0.0),
(-1.0, 1.0),
(0.0, 2.0),
(1.0, 2.0),
(2.0, 1.0),
(2.0, 0.0),
(1.0, -1.0),
(0.0, -1.0),
(-1.0, 0.0),
]
for index, coord in enumerate(h.exterior.coords):
assert coord[0] == pytest.approx(expected_coord[index][0])
assert coord[1] == pytest.approx(expected_coord[index][1])
def test_polygon_single_sideded(self):
g = geometry.Polygon([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])
h = g.buffer(1, quad_segs=1, single_sided=True)
assert h.geom_type == "Polygon"
expected_coord = [
(-1.0, 0.0),
(-1.0, 1.0),
(0.0, 2.0),
(1.0, 2.0),
(2.0, 1.0),
(2.0, 0.0),
(1.0, -1.0),
(0.0, -1.0),
(-1.0, 0.0),
]
for index, coord in enumerate(h.exterior.coords):
assert coord[0] == pytest.approx(expected_coord[index][0])
assert coord[1] == pytest.approx(expected_coord[index][1])
def test_enum_values(self):
assert CAP_STYLE.round == 1
assert CAP_STYLE.round == BufferCapStyle.round
assert CAP_STYLE.flat == 2
assert CAP_STYLE.flat == BufferCapStyle.flat
assert CAP_STYLE.square == 3
assert CAP_STYLE.square == BufferCapStyle.square
assert JOIN_STYLE.round == 1
assert JOIN_STYLE.round == BufferJoinStyle.round
assert JOIN_STYLE.mitre == 2
assert JOIN_STYLE.mitre == BufferJoinStyle.mitre
assert JOIN_STYLE.bevel == 3
assert JOIN_STYLE.bevel == BufferJoinStyle.bevel
def test_cap_style(self):
g = geometry.LineString([[0, 0], [1, 0]])
h = g.buffer(1, cap_style=BufferCapStyle.round)
assert h == g.buffer(1, cap_style=CAP_STYLE.round)
assert h == g.buffer(1, cap_style="round")
h = g.buffer(1, cap_style=BufferCapStyle.flat)
assert h == g.buffer(1, cap_style=CAP_STYLE.flat)
assert h == g.buffer(1, cap_style="flat")
h = g.buffer(1, cap_style=BufferCapStyle.square)
assert h == g.buffer(1, cap_style=CAP_STYLE.square)
assert h == g.buffer(1, cap_style="square")
def test_buffer_style(self):
g = geometry.LineString([[0, 0], [1, 0]])
h = g.buffer(1, join_style=BufferJoinStyle.round)
assert h == g.buffer(1, join_style=JOIN_STYLE.round)
assert h == g.buffer(1, join_style="round")
h = g.buffer(1, join_style=BufferJoinStyle.mitre)
assert h == g.buffer(1, join_style=JOIN_STYLE.mitre)
assert h == g.buffer(1, join_style="mitre")
h = g.buffer(1, join_style=BufferJoinStyle.bevel)
assert h == g.buffer(1, join_style=JOIN_STYLE.bevel)
assert h == g.buffer(1, join_style="bevel")
def test_deprecated_quadsegs():
point = geometry.Point(0, 0)
with pytest.warns(FutureWarning):
result = point.buffer(1, quadsegs=1)
expected = point.buffer(1, quad_segs=1)
assert result.equals(expected)
def test_resolution_alias():
point = geometry.Point(0, 0)
result = point.buffer(1, resolution=1)
expected = point.buffer(1, quad_segs=1)
assert result.equals(expected)
|
3208f6eafd24547c1056c6108640fea24e9581f2
|
7d901e1a364d1fd7389bf9c1c46d08493a99b3d7
|
/overlayer/textractoverlayer/_version.py
|
34fc6b28b0d6ae8bd60135f791473daf2ae0ef1c
|
[
"Apache-2.0"
] |
permissive
|
aws-samples/amazon-textract-textractor
|
9531e75783a37767a8c791c65149a1a2e5a55b82
|
e5051e53c062f8af60ec5fa9445affb0c7485f7b
|
refs/heads/master
| 2023-08-05T05:14:52.216417
| 2023-07-24T11:07:29
| 2023-07-24T11:07:29
| 185,276,103
| 286
| 117
|
Apache-2.0
| 2023-09-07T20:52:04
| 2019-05-06T21:43:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 24
|
py
|
_version.py
|
__version__ = '0.0.11'
|
2a8f4c1665ad321c19bb938db9ef02838d019690
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/identity/azure-identity/azure/identity/__init__.py
|
8030b55ee033a7ce962c81d53daa7858955961f2
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,833
|
py
|
__init__.py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Credentials for Azure SDK clients."""
from ._auth_record import AuthenticationRecord
from ._exceptions import AuthenticationRequiredError, CredentialUnavailableError
from ._constants import AzureAuthorityHosts, KnownAuthorities
from ._credentials import (
AuthorizationCodeCredential,
AzureDeveloperCliCredential,
AzureCliCredential,
AzurePowerShellCredential,
CertificateCredential,
ChainedTokenCredential,
ClientAssertionCredential,
ClientSecretCredential,
DefaultAzureCredential,
DeviceCodeCredential,
EnvironmentCredential,
InteractiveBrowserCredential,
ManagedIdentityCredential,
OnBehalfOfCredential,
SharedTokenCacheCredential,
UsernamePasswordCredential,
VisualStudioCodeCredential,
WorkloadIdentityCredential,
)
from ._persistent_cache import TokenCachePersistenceOptions
__all__ = [
"AuthenticationRecord",
"AuthenticationRequiredError",
"AuthorizationCodeCredential",
"AzureAuthorityHosts",
"AzureCliCredential",
"AzureDeveloperCliCredential",
"AzurePowerShellCredential",
"CertificateCredential",
"ChainedTokenCredential",
"ClientAssertionCredential",
"ClientSecretCredential",
"CredentialUnavailableError",
"DefaultAzureCredential",
"DeviceCodeCredential",
"EnvironmentCredential",
"InteractiveBrowserCredential",
"KnownAuthorities",
"OnBehalfOfCredential",
"ManagedIdentityCredential",
"SharedTokenCacheCredential",
"TokenCachePersistenceOptions",
"UsernamePasswordCredential",
"VisualStudioCodeCredential",
"WorkloadIdentityCredential",
]
from ._version import VERSION
__version__ = VERSION
|
e2b10366f28427c3f14551641b5a60745fc225d3
|
3ef70fe63acaa665e2b163f30f1abd0a592231c1
|
/stackoverflow/venv/lib/python3.6/site-packages/twisted/trial/test/test_assertions.py
|
e3f0fcd2638ce36fc2542c5bc06093dd14788930
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
wistbean/learn_python3_spider
|
14914b63691ac032955ba1adc29ad64976d80e15
|
40861791ec4ed3bbd14b07875af25cc740f76920
|
refs/heads/master
| 2023-08-16T05:42:27.208302
| 2023-03-30T17:03:58
| 2023-03-30T17:03:58
| 179,152,420
| 14,403
| 3,556
|
MIT
| 2022-05-20T14:08:34
| 2019-04-02T20:19:54
|
Python
|
UTF-8
|
Python
| false
| false
| 46,630
|
py
|
test_assertions.py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for assertions provided by C{SynchronousTestCase} and C{TestCase},
provided by L{twisted.trial.unittest}.
L{TestFailureTests} demonstrates that L{SynchronousTestCase.fail} works, so that
is the only method on C{twisted.trial.unittest.SynchronousTestCase} that is
initially assumed to work. The test classes are arranged so that the methods
demonstrated to work earlier in the file are used by those later in the file
(even though the runner will probably not run the tests in this order).
"""
from __future__ import division, absolute_import
import warnings
import unittest as pyunit
from twisted.python.util import FancyEqMixin
from twisted.python.reflect import (
prefixedMethods, accumulateMethods, fullyQualifiedName)
from twisted.python.deprecate import deprecated
from incremental import Version, getVersionString
from twisted.python.failure import Failure
from twisted.trial import unittest
from twisted.internet.defer import Deferred, fail, succeed
class MockEquality(FancyEqMixin, object):
compareAttributes = ("name",)
def __init__(self, name):
self.name = name
def __repr__(self):
return "MockEquality(%s)" % (self.name,)
class ComparisonError(object):
"""
An object which raises exceptions from its comparison methods.
"""
def _error(self, other):
raise ValueError("Comparison is broken")
__eq__ = __ne__ = _error
class TestFailureTests(pyunit.TestCase):
"""
Tests for the most basic functionality of L{SynchronousTestCase}, for
failing tests.
This class contains tests to demonstrate that L{SynchronousTestCase.fail}
can be used to fail a test, and that that failure is reflected in the test
result object. This should be sufficient functionality so that further
tests can be built on L{SynchronousTestCase} instead of
L{unittest.TestCase}. This depends on L{unittest.TestCase} working.
"""
class FailingTest(unittest.SynchronousTestCase):
def test_fails(self):
self.fail("This test fails.")
def setUp(self):
"""
Load a suite of one test which can be used to exercise the failure
handling behavior.
"""
components = [
__name__, self.__class__.__name__, self.FailingTest.__name__]
self.loader = pyunit.TestLoader()
self.suite = self.loader.loadTestsFromName(".".join(components))
self.test = list(self.suite)[0]
def test_fail(self):
"""
L{SynchronousTestCase.fail} raises
L{SynchronousTestCase.failureException} with the given argument.
"""
try:
self.test.fail("failed")
except self.test.failureException as result:
self.assertEqual("failed", str(result))
else:
self.fail(
"SynchronousTestCase.fail method did not raise "
"SynchronousTestCase.failureException")
def test_failingExceptionFails(self):
"""
When a test method raises L{SynchronousTestCase.failureException}, the test is
marked as having failed on the L{TestResult}.
"""
result = pyunit.TestResult()
self.suite.run(result)
self.assertFalse(result.wasSuccessful())
self.assertEqual(result.errors, [])
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.failures[0][0], self.test)
class AssertFalseTests(unittest.SynchronousTestCase):
"""
Tests for L{SynchronousTestCase}'s C{assertFalse} and C{failIf} assertion
methods.
This is pretty paranoid. Still, a certain paranoia is healthy if you
are testing a unit testing framework.
@note: As of 11.2, C{assertFalse} is preferred over C{failIf}.
"""
def _assertFalseFalse(self, method):
"""
Perform the positive case test for C{failIf} or C{assertFalse}.
@param method: The test method to test.
"""
for notTrue in [0, 0.0, False, None, (), []]:
result = method(notTrue, "failed on %r" % (notTrue,))
if result != notTrue:
self.fail("Did not return argument %r" % (notTrue,))
def _assertFalseTrue(self, method):
"""
Perform the negative case test for C{failIf} or C{assertFalse}.
@param method: The test method to test.
"""
for true in [1, True, 'cat', [1,2], (3,4)]:
try:
method(true, "failed on %r" % (true,))
except self.failureException as e:
self.assertIn(
"failed on %r" % (true,), str(e),
"Raised incorrect exception on %r: %r" % (true, e)
)
else:
self.fail(
"Call to %s(%r) didn't fail" % (method.__name__, true,)
)
def test_failIfFalse(self):
"""
L{SynchronousTestCase.failIf} returns its argument if its argument is
not considered true.
"""
self._assertFalseFalse(self.failIf)
def test_assertFalseFalse(self):
"""
L{SynchronousTestCase.assertFalse} returns its argument if its argument
is not considered true.
"""
self._assertFalseFalse(self.assertFalse)
def test_failIfTrue(self):
"""
L{SynchronousTestCase.failIf} raises
L{SynchronousTestCase.failureException} if its argument is considered
true.
"""
self._assertFalseTrue(self.failIf)
def test_assertFalseTrue(self):
"""
L{SynchronousTestCase.assertFalse} raises
L{SynchronousTestCase.failureException} if its argument is considered
true.
"""
self._assertFalseTrue(self.assertFalse)
class AssertTrueTests(unittest.SynchronousTestCase):
"""
Tests for L{SynchronousTestCase}'s C{assertTrue} and C{failUnless} assertion
methods.
This is pretty paranoid. Still, a certain paranoia is healthy if you
are testing a unit testing framework.
@note: As of 11.2, C{assertTrue} is preferred over C{failUnless}.
"""
def _assertTrueFalse(self, method):
"""
Perform the negative case test for C{assertTrue} and C{failUnless}.
@param method: The test method to test.
"""
for notTrue in [0, 0.0, False, None, (), []]:
try:
method(notTrue, "failed on %r" % (notTrue,))
except self.failureException as e:
self.assertIn(
"failed on %r" % (notTrue,), str(e),
"Raised incorrect exception on %r: %r" % (notTrue, e)
)
else:
self.fail(
"Call to %s(%r) didn't fail" % (method.__name__, notTrue,)
)
def _assertTrueTrue(self, method):
"""
Perform the positive case test for C{assertTrue} and C{failUnless}.
@param method: The test method to test.
"""
for true in [1, True, 'cat', [1,2], (3,4)]:
result = method(true, "failed on %r" % (true,))
if result != true:
self.fail("Did not return argument %r" % (true,))
def test_assertTrueFalse(self):
"""
L{SynchronousTestCase.assertTrue} raises
L{SynchronousTestCase.failureException} if its argument is not
considered true.
"""
self._assertTrueFalse(self.assertTrue)
def test_failUnlessFalse(self):
"""
L{SynchronousTestCase.failUnless} raises
L{SynchronousTestCase.failureException} if its argument is not
considered true.
"""
self._assertTrueFalse(self.failUnless)
def test_assertTrueTrue(self):
"""
L{SynchronousTestCase.assertTrue} returns its argument if its argument
is considered true.
"""
self._assertTrueTrue(self.assertTrue)
def test_failUnlessTrue(self):
"""
L{SynchronousTestCase.failUnless} returns its argument if its argument
is considered true.
"""
self._assertTrueTrue(self.failUnless)
class SynchronousAssertionsTests(unittest.SynchronousTestCase):
"""
Tests for L{SynchronousTestCase}'s assertion methods. That is, failUnless*,
failIf*, assert* (not covered by other more specific test classes).
Note: As of 11.2, assertEqual is preferred over the failUnlessEqual(s)
variants. Tests have been modified to reflect this preference.
This is pretty paranoid. Still, a certain paranoia is healthy if you are
testing a unit testing framework.
"""
def _testEqualPair(self, first, second):
x = self.assertEqual(first, second)
if x != first:
self.fail("assertEqual should return first parameter")
def _testUnequalPair(self, first, second):
"""
Assert that when called with unequal arguments, C{assertEqual} raises a
failure exception with the same message as the standard library
C{assertEqual} would have raised.
"""
raised = False
try:
self.assertEqual(first, second)
except self.failureException as ourFailure:
case = pyunit.TestCase("setUp")
try:
case.assertEqual(first, second)
except case.failureException as theirFailure:
raised = True
got = str(ourFailure)
expected = str(theirFailure)
if expected != got:
self.fail("Expected: %r; Got: %r" % (expected, got))
if not raised:
self.fail(
"Call to assertEqual(%r, %r) didn't fail" % (first, second)
)
def test_assertEqual_basic(self):
self._testEqualPair('cat', 'cat')
self._testUnequalPair('cat', 'dog')
self._testEqualPair([1], [1])
self._testUnequalPair([1], 'orange')
def test_assertEqual_custom(self):
x = MockEquality('first')
y = MockEquality('second')
z = MockEquality('first')
self._testEqualPair(x, x)
self._testEqualPair(x, z)
self._testUnequalPair(x, y)
self._testUnequalPair(y, z)
def test_assertEqualMessage(self):
"""
When a message is passed to L{assertEqual} it is included in the error
message.
"""
message = 'message'
exception = self.assertRaises(
self.failureException, self.assertEqual,
'foo', 'bar', message
)
self.assertIn(message, str(exception))
def test_assertEqualNoneMessage(self):
"""
If a message is specified as L{None}, it is not included in the error
message of L{assertEqual}.
"""
exceptionForNone = self.assertRaises(
self.failureException, self.assertEqual, 'foo', 'bar', None
)
exceptionWithout = self.assertRaises(
self.failureException, self.assertEqual, 'foo', 'bar'
)
self.assertEqual(str(exceptionWithout), str(exceptionForNone))
def test_assertEqual_incomparable(self):
apple = ComparisonError()
orange = ["orange"]
try:
self.assertEqual(apple, orange)
except self.failureException:
self.fail("Fail raised when ValueError ought to have been raised.")
except ValueError:
# good. error not swallowed
pass
else:
self.fail("Comparing %r and %r should have raised an exception"
% (apple, orange))
def _raiseError(self, error):
raise error
def test_failUnlessRaises_expected(self):
x = self.failUnlessRaises(ValueError, self._raiseError, ValueError)
self.assertTrue(isinstance(x, ValueError),
"Expect failUnlessRaises to return instance of raised "
"exception.")
def test_failUnlessRaises_unexpected(self):
try:
self.failUnlessRaises(ValueError, self._raiseError, TypeError)
except TypeError:
self.fail("failUnlessRaises shouldn't re-raise unexpected "
"exceptions")
except self.failureException:
# what we expect
pass
else:
self.fail("Expected exception wasn't raised. Should have failed")
def test_failUnlessRaises_noException(self):
returnValue = 3
try:
self.failUnlessRaises(ValueError, lambda : returnValue)
except self.failureException as e:
self.assertEqual(str(e),
'ValueError not raised (3 returned)')
else:
self.fail("Exception not raised. Should have failed")
def test_failUnlessRaises_failureException(self):
x = self.failUnlessRaises(self.failureException, self._raiseError,
self.failureException)
self.assertTrue(isinstance(x, self.failureException),
"Expected %r instance to be returned"
% (self.failureException,))
try:
x = self.failUnlessRaises(self.failureException, self._raiseError,
ValueError)
except self.failureException:
# what we expect
pass
else:
self.fail("Should have raised exception")
def test_assertRaisesContextExpected(self):
"""
If C{assertRaises} is used to create a context manager and an exception
is raised from the body of the C{with} statement then the context
manager's C{exception} attribute is set to the exception that was
raised.
"""
exception = ValueError('marker')
with self.assertRaises(ValueError) as context:
raise exception
self.assertIs(exception, context.exception)
def test_assertRaisesContextUnexpected(self):
"""
If C{assertRaises} is used to create a context manager and the wrong
exception type is raised from the body of the C{with} statement then
the C{with} statement raises C{failureException} describing the
mismatch.
"""
try:
with self.assertRaises(ValueError):
raise TypeError('marker')
except self.failureException as exception:
message = str(exception)
expected = (
"{type} raised instead of ValueError:\n"
" Traceback").format(type=fullyQualifiedName(TypeError))
self.assertTrue(
message.startswith(expected),
"Exception message did not begin with expected information: "
"{0}".format(message))
else:
self.fail(
"Mismatched exception type should have caused test failure.")
def test_assertRaisesContextNoException(self):
"""
If C{assertRaises} is used to create a context manager and no exception
is raised from the body of the C{with} statement then the C{with}
statement raises C{failureException} describing the lack of exception.
"""
try:
with self.assertRaises(ValueError):
# No exception is raised.
pass
except self.failureException as exception:
message = str(exception)
# `(None returned)` text is here for backward compatibility and should
# be ignored for context manager use case.
self.assertEqual(message, "ValueError not raised (None returned)")
else:
self.fail("Non-exception result should have caused test failure.")
def test_brokenName(self):
"""
If the exception type passed to C{assertRaises} does not have a
C{__name__} then the context manager still manages to construct a
descriptive string for it.
"""
try:
with self.assertRaises((ValueError, TypeError)):
# Just some other kind of exception
raise AttributeError()
except self.failureException as exception:
message = str(exception)
valueError = "ValueError" not in message
typeError = "TypeError" not in message
errors = []
if valueError:
errors.append("expected ValueError in exception message")
if typeError:
errors.append("expected TypeError in exception message")
if errors:
self.fail("; ".join(errors), "message = {0}".format(message))
else:
self.fail(
"Mismatched exception type should have caused test failure.")
def test_failIfEqual_basic(self):
x, y, z = [1], [2], [1]
ret = self.failIfEqual(x, y)
self.assertEqual(ret, x,
"failIfEqual should return first parameter")
self.failUnlessRaises(self.failureException,
self.failIfEqual, x, x)
self.failUnlessRaises(self.failureException,
self.failIfEqual, x, z)
def test_failIfEqual_customEq(self):
x = MockEquality('first')
y = MockEquality('second')
z = MockEquality('fecund')
ret = self.failIfEqual(x, y)
self.assertEqual(ret, x,
"failIfEqual should return first parameter")
self.failUnlessRaises(self.failureException,
self.failIfEqual, x, x)
self.failIfEqual(x, z, "__ne__ should make these not equal")
def test_failIfIdenticalPositive(self):
"""
C{failIfIdentical} returns its first argument if its first and second
arguments are not the same object.
"""
x = object()
y = object()
result = self.failIfIdentical(x, y)
self.assertEqual(x, result)
def test_failIfIdenticalNegative(self):
"""
C{failIfIdentical} raises C{failureException} if its first and second
arguments are the same object.
"""
x = object()
self.failUnlessRaises(self.failureException,
self.failIfIdentical, x, x)
def test_failUnlessIdentical(self):
x, y, z = [1], [1], [2]
ret = self.failUnlessIdentical(x, x)
self.assertEqual(ret, x,
'failUnlessIdentical should return first '
'parameter')
self.failUnlessRaises(self.failureException,
self.failUnlessIdentical, x, y)
self.failUnlessRaises(self.failureException,
self.failUnlessIdentical, x, z)
def test_failUnlessApproximates(self):
x, y, z = 1.0, 1.1, 1.2
self.failUnlessApproximates(x, x, 0.2)
ret = self.failUnlessApproximates(x, y, 0.2)
self.assertEqual(ret, x, "failUnlessApproximates should return "
"first parameter")
self.failUnlessRaises(self.failureException,
self.failUnlessApproximates, x, z, 0.1)
self.failUnlessRaises(self.failureException,
self.failUnlessApproximates, x, y, 0.1)
def test_failUnlessAlmostEqual(self):
precision = 5
x = 8.000001
y = 8.00001
z = 8.000002
self.failUnlessAlmostEqual(x, x, precision)
ret = self.failUnlessAlmostEqual(x, z, precision)
self.assertEqual(ret, x, "failUnlessAlmostEqual should return "
"first parameter (%r, %r)" % (ret, x))
self.failUnlessRaises(self.failureException,
self.failUnlessAlmostEqual, x, y, precision)
def test_failIfAlmostEqual(self):
precision = 5
x = 8.000001
y = 8.00001
z = 8.000002
ret = self.failIfAlmostEqual(x, y, precision)
self.assertEqual(ret, x, "failIfAlmostEqual should return "
"first parameter (%r, %r)" % (ret, x))
self.failUnlessRaises(self.failureException,
self.failIfAlmostEqual, x, x, precision)
self.failUnlessRaises(self.failureException,
self.failIfAlmostEqual, x, z, precision)
def test_failUnlessSubstring(self):
x = "cat"
y = "the dog sat"
z = "the cat sat"
self.failUnlessSubstring(x, x)
ret = self.failUnlessSubstring(x, z)
self.assertEqual(ret, x, 'should return first parameter')
self.failUnlessRaises(self.failureException,
self.failUnlessSubstring, x, y)
self.failUnlessRaises(self.failureException,
self.failUnlessSubstring, z, x)
def test_failIfSubstring(self):
x = "cat"
y = "the dog sat"
z = "the cat sat"
self.failIfSubstring(z, x)
ret = self.failIfSubstring(x, y)
self.assertEqual(ret, x, 'should return first parameter')
self.failUnlessRaises(self.failureException,
self.failIfSubstring, x, x)
self.failUnlessRaises(self.failureException,
self.failIfSubstring, x, z)
def test_assertIs(self):
"""
L{assertIs} passes if two objects are identical.
"""
a = MockEquality("first")
self.assertIs(a, a)
def test_assertIsError(self):
"""
L{assertIs} fails if two objects are not identical.
"""
a, b = MockEquality("first"), MockEquality("first")
self.assertEqual(a, b)
self.assertRaises(self.failureException, self.assertIs, a, b)
def test_assertIsNot(self):
"""
L{assertIsNot} passes if two objects are not identical.
"""
a, b = MockEquality("first"), MockEquality("first")
self.assertEqual(a, b)
self.assertIsNot(a, b)
def test_assertIsNotError(self):
"""
L{assertIsNot} fails if two objects are identical.
"""
a = MockEquality("first")
self.assertRaises(self.failureException, self.assertIsNot, a, a)
def test_assertIsInstance(self):
"""
Test a true condition of assertIsInstance.
"""
A = type('A', (object,), {})
a = A()
self.assertIsInstance(a, A)
def test_assertIsInstanceMultipleClasses(self):
"""
Test a true condition of assertIsInstance with multiple classes.
"""
A = type('A', (object,), {})
B = type('B', (object,), {})
a = A()
self.assertIsInstance(a, (A, B))
def test_assertIsInstanceError(self):
"""
Test an error with assertIsInstance.
"""
A = type('A', (object,), {})
B = type('B', (object,), {})
a = A()
self.assertRaises(self.failureException, self.assertIsInstance, a, B)
def test_assertIsInstanceErrorMultipleClasses(self):
"""
Test an error with assertIsInstance and multiple classes.
"""
A = type('A', (object,), {})
B = type('B', (object,), {})
C = type('C', (object,), {})
a = A()
self.assertRaises(self.failureException, self.assertIsInstance, a, (B, C))
def test_assertIsInstanceCustomMessage(self):
"""
If L{TestCase.assertIsInstance} is passed a custom message as its 3rd
argument, the message is included in the failure exception raised when
the assertion fails.
"""
exc = self.assertRaises(
self.failureException,
self.assertIsInstance, 3, str, "Silly assertion")
self.assertIn("Silly assertion", str(exc))
def test_assertNotIsInstance(self):
"""
Test a true condition of assertNotIsInstance.
"""
A = type('A', (object,), {})
B = type('B', (object,), {})
a = A()
self.assertNotIsInstance(a, B)
def test_assertNotIsInstanceMultipleClasses(self):
"""
Test a true condition of assertNotIsInstance and multiple classes.
"""
A = type('A', (object,), {})
B = type('B', (object,), {})
C = type('C', (object,), {})
a = A()
self.assertNotIsInstance(a, (B, C))
def test_assertNotIsInstanceError(self):
"""
Test an error with assertNotIsInstance.
"""
A = type('A', (object,), {})
a = A()
error = self.assertRaises(self.failureException,
self.assertNotIsInstance, a, A)
self.assertEqual(str(error), "%r is an instance of %s" % (a, A))
def test_assertNotIsInstanceErrorMultipleClasses(self):
"""
Test an error with assertNotIsInstance and multiple classes.
"""
A = type('A', (object,), {})
B = type('B', (object,), {})
a = A()
self.assertRaises(self.failureException, self.assertNotIsInstance, a, (A, B))
def test_assertDictEqual(self):
"""
L{twisted.trial.unittest.TestCase} supports the C{assertDictEqual}
method inherited from the standard library in Python 2.7.
"""
self.assertDictEqual({'a': 1}, {'a': 1})
if getattr(unittest.SynchronousTestCase, 'assertDictEqual', None) is None:
test_assertDictEqual.skip = (
"assertDictEqual is not available on this version of Python")
class WarningAssertionTests(unittest.SynchronousTestCase):
def test_assertWarns(self):
"""
Test basic assertWarns report.
"""
def deprecated(a):
warnings.warn("Woo deprecated", category=DeprecationWarning)
return a
r = self.assertWarns(DeprecationWarning, "Woo deprecated", __file__,
deprecated, 123)
self.assertEqual(r, 123)
def test_assertWarnsRegistryClean(self):
"""
Test that assertWarns cleans the warning registry, so the warning is
not swallowed the second time.
"""
def deprecated(a):
warnings.warn("Woo deprecated", category=DeprecationWarning)
return a
r1 = self.assertWarns(DeprecationWarning, "Woo deprecated", __file__,
deprecated, 123)
self.assertEqual(r1, 123)
# The warning should be raised again
r2 = self.assertWarns(DeprecationWarning, "Woo deprecated", __file__,
deprecated, 321)
self.assertEqual(r2, 321)
def test_assertWarnsError(self):
"""
Test assertWarns failure when no warning is generated.
"""
def normal(a):
return a
self.assertRaises(self.failureException,
self.assertWarns, DeprecationWarning, "Woo deprecated", __file__,
normal, 123)
def test_assertWarnsWrongCategory(self):
"""
Test assertWarns failure when the category is wrong.
"""
def deprecated(a):
warnings.warn("Foo deprecated", category=DeprecationWarning)
return a
self.assertRaises(self.failureException,
self.assertWarns, UserWarning, "Foo deprecated", __file__,
deprecated, 123)
def test_assertWarnsWrongMessage(self):
"""
Test assertWarns failure when the message is wrong.
"""
def deprecated(a):
warnings.warn("Foo deprecated", category=DeprecationWarning)
return a
self.assertRaises(self.failureException,
self.assertWarns, DeprecationWarning, "Bar deprecated", __file__,
deprecated, 123)
def test_assertWarnsWrongFile(self):
"""
If the warning emitted by a function refers to a different file than is
passed to C{assertWarns}, C{failureException} is raised.
"""
def deprecated(a):
# stacklevel=2 points at the direct caller of the function. The
# way assertRaises is invoked below, the direct caller will be
# something somewhere in trial, not something in this file. In
# Python 2.5 and earlier, stacklevel of 0 resulted in a warning
# pointing to the warnings module itself. Starting in Python 2.6,
# stacklevel of 0 and 1 both result in a warning pointing to *this*
# file, presumably due to the fact that the warn function is
# implemented in C and has no convenient Python
# filename/linenumber.
warnings.warn(
"Foo deprecated", category=DeprecationWarning, stacklevel=2)
self.assertRaises(
self.failureException,
# Since the direct caller isn't in this file, try to assert that
# the warning *does* point to this file, so that assertWarns raises
# an exception.
self.assertWarns, DeprecationWarning, "Foo deprecated", __file__,
deprecated, 123)
def test_assertWarnsOnClass(self):
"""
Test assertWarns works when creating a class instance.
"""
class Warn:
def __init__(self):
warnings.warn("Do not call me", category=RuntimeWarning)
r = self.assertWarns(RuntimeWarning, "Do not call me", __file__,
Warn)
self.assertTrue(isinstance(r, Warn))
r = self.assertWarns(RuntimeWarning, "Do not call me", __file__,
Warn)
self.assertTrue(isinstance(r, Warn))
def test_assertWarnsOnMethod(self):
"""
Test assertWarns works when used on an instance method.
"""
class Warn:
def deprecated(self, a):
warnings.warn("Bar deprecated", category=DeprecationWarning)
return a
w = Warn()
r = self.assertWarns(DeprecationWarning, "Bar deprecated", __file__,
w.deprecated, 321)
self.assertEqual(r, 321)
r = self.assertWarns(DeprecationWarning, "Bar deprecated", __file__,
w.deprecated, 321)
self.assertEqual(r, 321)
def test_assertWarnsOnCall(self):
"""
Test assertWarns works on instance with C{__call__} method.
"""
class Warn:
def __call__(self, a):
warnings.warn("Egg deprecated", category=DeprecationWarning)
return a
w = Warn()
r = self.assertWarns(DeprecationWarning, "Egg deprecated", __file__,
w, 321)
self.assertEqual(r, 321)
r = self.assertWarns(DeprecationWarning, "Egg deprecated", __file__,
w, 321)
self.assertEqual(r, 321)
def test_assertWarnsFilter(self):
"""
Test assertWarns on a warning filtered by default.
"""
def deprecated(a):
warnings.warn("Woo deprecated", category=PendingDeprecationWarning)
return a
r = self.assertWarns(PendingDeprecationWarning, "Woo deprecated",
__file__, deprecated, 123)
self.assertEqual(r, 123)
def test_assertWarnsMultipleWarnings(self):
"""
C{assertWarns} does not raise an exception if the function it is passed
triggers the same warning more than once.
"""
def deprecated():
warnings.warn("Woo deprecated", category=PendingDeprecationWarning)
def f():
deprecated()
deprecated()
self.assertWarns(
PendingDeprecationWarning, "Woo deprecated", __file__, f)
def test_assertWarnsDifferentWarnings(self):
"""
For now, assertWarns is unable to handle multiple different warnings,
so it should raise an exception if it's the case.
"""
def deprecated(a):
warnings.warn("Woo deprecated", category=DeprecationWarning)
warnings.warn("Another one", category=PendingDeprecationWarning)
e = self.assertRaises(self.failureException,
self.assertWarns, DeprecationWarning, "Woo deprecated",
__file__, deprecated, 123)
self.assertEqual(str(e), "Can't handle different warnings")
def test_assertWarnsAfterUnassertedWarning(self):
"""
Warnings emitted before L{TestCase.assertWarns} is called do not get
flushed and do not alter the behavior of L{TestCase.assertWarns}.
"""
class TheWarning(Warning):
pass
def f(message):
warnings.warn(message, category=TheWarning)
f("foo")
self.assertWarns(TheWarning, "bar", __file__, f, "bar")
[warning] = self.flushWarnings([f])
self.assertEqual(warning['message'], "foo")
class ResultOfAssertionsTests(unittest.SynchronousTestCase):
"""
Tests for L{SynchronousTestCase.successResultOf},
L{SynchronousTestCase.failureResultOf}, and
L{SynchronousTestCase.assertNoResult}.
"""
result = object()
failure = Failure(Exception("Bad times"))
def test_withoutSuccessResult(self):
"""
L{SynchronousTestCase.successResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with no current result.
"""
self.assertRaises(
self.failureException, self.successResultOf, Deferred())
def test_successResultOfWithFailure(self):
"""
L{SynchronousTestCase.successResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with a failure result.
"""
self.assertRaises(
self.failureException, self.successResultOf, fail(self.failure))
def test_successResultOfWithFailureHasTraceback(self):
"""
L{SynchronousTestCase.successResultOf} raises a
L{SynchronousTestCase.failureException} that has the original failure
traceback when called with a L{Deferred} with a failure result.
"""
try:
self.successResultOf(fail(self.failure))
except self.failureException as e:
self.assertIn(self.failure.getTraceback(), str(e))
def test_withoutFailureResult(self):
"""
L{SynchronousTestCase.failureResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with no current result.
"""
self.assertRaises(
self.failureException, self.failureResultOf, Deferred())
def test_failureResultOfWithSuccess(self):
"""
L{SynchronousTestCase.failureResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with a success result.
"""
self.assertRaises(
self.failureException, self.failureResultOf, succeed(self.result))
def test_failureResultOfWithWrongFailure(self):
"""
L{SynchronousTestCase.failureResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with a failure type that was not expected.
"""
self.assertRaises(
self.failureException, self.failureResultOf, fail(self.failure),
KeyError)
def test_failureResultOfWithWrongFailureOneExpectedFailure(self):
"""
L{SynchronousTestCase.failureResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with a failure type that was not expected, and the
L{SynchronousTestCase.failureException} message contains the original
failure traceback as well as the expected failure type
"""
try:
self.failureResultOf(fail(self.failure), KeyError)
except self.failureException as e:
self.assertIn(self.failure.getTraceback(), str(e))
self.assertIn(
"Failure of type ({0}.{1}) expected on".format(
KeyError.__module__, KeyError.__name__),
str(e))
def test_failureResultOfWithWrongFailureMultiExpectedFailure(self):
"""
L{SynchronousTestCase.failureResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with a failure type that was not expected, and the
L{SynchronousTestCase.failureException} message contains the original
failure traceback as well as the expected failure types in the error
message
"""
try:
self.failureResultOf(fail(self.failure), KeyError, IOError)
except self.failureException as e:
self.assertIn(self.failure.getTraceback(), str(e))
self.assertIn(
"Failure of type ({0}.{1} or {2}.{3}) expected on".format(
KeyError.__module__, KeyError.__name__,
IOError.__module__, IOError.__name__),
str(e))
def test_withSuccessResult(self):
"""
When passed a L{Deferred} which currently has a result (ie,
L{Deferred.addCallback} would cause the added callback to be called
before C{addCallback} returns), L{SynchronousTestCase.successResultOf}
returns that result.
"""
self.assertIdentical(
self.result, self.successResultOf(succeed(self.result)))
def test_withExpectedFailureResult(self):
"""
When passed a L{Deferred} which currently has a L{Failure} result (ie,
L{Deferred.addErrback} would cause the added errback to be called
before C{addErrback} returns), L{SynchronousTestCase.failureResultOf}
returns that L{Failure} if that L{Failure}'s type is expected.
"""
self.assertIdentical(
self.failure,
self.failureResultOf(fail(self.failure), self.failure.type,
KeyError))
def test_withFailureResult(self):
"""
When passed a L{Deferred} which currently has a L{Failure} result
(ie, L{Deferred.addErrback} would cause the added errback to be called
before C{addErrback} returns), L{SynchronousTestCase.failureResultOf}
returns that L{Failure}.
"""
self.assertIdentical(
self.failure, self.failureResultOf(fail(self.failure)))
def test_assertNoResultSuccess(self):
"""
When passed a L{Deferred} which currently has a success result (see
L{test_withSuccessResult}), L{SynchronousTestCase.assertNoResult} raises
L{SynchronousTestCase.failureException}.
"""
self.assertRaises(
self.failureException, self.assertNoResult, succeed(self.result))
def test_assertNoResultFailure(self):
"""
When passed a L{Deferred} which currently has a failure result (see
L{test_withFailureResult}), L{SynchronousTestCase.assertNoResult} raises
L{SynchronousTestCase.failureException}.
"""
self.assertRaises(
self.failureException, self.assertNoResult, fail(self.failure))
def test_assertNoResult(self):
"""
When passed a L{Deferred} with no current result,
"""
self.assertNoResult(Deferred())
def test_assertNoResultPropagatesSuccess(self):
"""
When passed a L{Deferred} with no current result, which is then
fired with a success result, L{SynchronousTestCase.assertNoResult}
doesn't modify the result of the L{Deferred}.
"""
d = Deferred()
self.assertNoResult(d)
d.callback(self.result)
self.assertEqual(self.result, self.successResultOf(d))
def test_assertNoResultPropagatesLaterFailure(self):
"""
When passed a L{Deferred} with no current result, which is then
fired with a L{Failure} result, L{SynchronousTestCase.assertNoResult}
doesn't modify the result of the L{Deferred}.
"""
d = Deferred()
self.assertNoResult(d)
d.errback(self.failure)
self.assertEqual(self.failure, self.failureResultOf(d))
def test_assertNoResultSwallowsImmediateFailure(self):
"""
When passed a L{Deferred} which currently has a L{Failure} result,
L{SynchronousTestCase.assertNoResult} changes the result of the
L{Deferred} to a success.
"""
d = fail(self.failure)
try:
self.assertNoResult(d)
except self.failureException:
pass
self.assertEqual(None, self.successResultOf(d))
class AssertionNamesTests(unittest.SynchronousTestCase):
"""
Tests for consistency of naming within TestCase assertion methods
"""
def _getAsserts(self):
dct = {}
accumulateMethods(self, dct, 'assert')
return [ dct[k] for k in dct if not k.startswith('Not') and k != '_' ]
def _name(self, x):
return x.__name__
def test_failUnlessMatchesAssert(self):
"""
The C{failUnless*} test methods are a subset of the C{assert*} test
methods. This is intended to ensure that methods using the
I{failUnless} naming scheme are not added without corresponding methods
using the I{assert} naming scheme. The I{assert} naming scheme is
preferred, and new I{assert}-prefixed methods may be added without
corresponding I{failUnless}-prefixed methods.
"""
asserts = set(self._getAsserts())
failUnlesses = set(prefixedMethods(self, 'failUnless'))
self.assertEqual(
failUnlesses, asserts.intersection(failUnlesses))
def test_failIf_matches_assertNot(self):
asserts = prefixedMethods(unittest.SynchronousTestCase, 'assertNot')
failIfs = prefixedMethods(unittest.SynchronousTestCase, 'failIf')
self.assertEqual(sorted(asserts, key=self._name),
sorted(failIfs, key=self._name))
def test_equalSpelling(self):
for name, value in vars(self).items():
if not callable(value):
continue
if name.endswith('Equal'):
self.assertTrue(hasattr(self, name+'s'),
"%s but no %ss" % (name, name))
self.assertEqual(value, getattr(self, name+'s'))
if name.endswith('Equals'):
self.assertTrue(hasattr(self, name[:-1]),
"%s but no %s" % (name, name[:-1]))
self.assertEqual(value, getattr(self, name[:-1]))
class CallDeprecatedTests(unittest.SynchronousTestCase):
"""
Test use of the L{SynchronousTestCase.callDeprecated} method with version objects.
"""
version = Version('Twisted', 8, 0, 0)
def test_callDeprecatedSuppressesWarning(self):
"""
callDeprecated calls a deprecated callable, suppressing the
deprecation warning.
"""
self.callDeprecated(self.version, oldMethod, 'foo')
self.assertEqual(
self.flushWarnings(), [], "No warnings should be shown")
def test_callDeprecatedCallsFunction(self):
"""
L{callDeprecated} actually calls the callable passed to it, and
forwards the result.
"""
result = self.callDeprecated(self.version, oldMethod, 'foo')
self.assertEqual('foo', result)
def test_failsWithoutDeprecation(self):
"""
L{callDeprecated} raises a test failure if the callable is not
deprecated.
"""
def notDeprecated():
pass
exception = self.assertRaises(
self.failureException,
self.callDeprecated, self.version, notDeprecated)
self.assertEqual(
"%r is not deprecated." % notDeprecated, str(exception))
def test_failsWithIncorrectDeprecation(self):
"""
callDeprecated raises a test failure if the callable was deprecated
at a different version to the one expected.
"""
differentVersion = Version('Foo', 1, 2, 3)
exception = self.assertRaises(
self.failureException,
self.callDeprecated,
differentVersion, oldMethod, 'foo')
self.assertIn(getVersionString(self.version), str(exception))
self.assertIn(getVersionString(differentVersion), str(exception))
def test_nestedDeprecation(self):
"""
L{callDeprecated} ignores all deprecations apart from the first.
Multiple warnings are generated when a deprecated function calls
another deprecated function. The first warning is the one generated by
the explicitly called function. That's the warning that we care about.
"""
differentVersion = Version('Foo', 1, 2, 3)
def nestedDeprecation(*args):
return oldMethod(*args)
nestedDeprecation = deprecated(differentVersion)(nestedDeprecation)
self.callDeprecated(differentVersion, nestedDeprecation, 24)
# The oldMethod deprecation should have been emitted too, not captured
# by callDeprecated. Flush it now to make sure it did happen and to
# prevent it from showing up on stdout.
warningsShown = self.flushWarnings()
self.assertEqual(len(warningsShown), 1,
"Unexpected warnings: {}".format(warningsShown))
def test_callDeprecationWithMessage(self):
"""
L{callDeprecated} can take a message argument used to check the warning
emitted.
"""
self.callDeprecated((self.version, "newMethod"),
oldMethodReplaced, 1)
def test_callDeprecationWithWrongMessage(self):
"""
If the message passed to L{callDeprecated} doesn't match,
L{callDeprecated} raises a test failure.
"""
exception = self.assertRaises(
self.failureException,
self.callDeprecated,
(self.version, "something.wrong"),
oldMethodReplaced, 1)
self.assertIn(getVersionString(self.version), str(exception))
self.assertIn("please use newMethod instead", str(exception))
@deprecated(CallDeprecatedTests.version)
def oldMethod(x):
"""
Deprecated method for testing.
"""
return x
@deprecated(CallDeprecatedTests.version, replacement="newMethod")
def oldMethodReplaced(x):
"""
Another deprecated method, which has been deprecated in favor of the
mythical 'newMethod'.
"""
return 2 * x
|
d1faab50e0a21ab258d22df8bde1b091bb73f1f6
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/decoding/GAD/fairseq/logging/metrics.py
|
7b56e31592da6f7362b1fee780071081df43fa28
|
[
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071
| 2023-08-29T14:07:57
| 2023-08-29T14:07:57
| 198,350,484
| 15,313
| 2,192
|
MIT
| 2023-08-19T11:33:20
| 2019-07-23T04:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 9,304
|
py
|
metrics.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A standalone module for aggregating metrics.
Metrics can be logged from anywhere using the `log_*` functions defined
in this module. The logged values will be aggregated dynamically based
on the aggregation context in which the logging occurs. See the
:func:`aggregate` context manager for more details.
"""
import contextlib
import time
import uuid
from collections import OrderedDict, defaultdict
from typing import Callable, Dict, List, Optional
from .meters import *
# Aggregation contexts are considered "active" when inside the scope
# created by the :func:`aggregate` context manager.
_aggregators = OrderedDict()
_active_aggregators = OrderedDict()
_active_aggregators_cnt = defaultdict(lambda: 0)
def reset() -> None:
"""Reset all metrics aggregators."""
_aggregators.clear()
_active_aggregators.clear()
_active_aggregators_cnt.clear()
# The "default" aggregator observes all logged values.
_aggregators["default"] = MetersDict()
_active_aggregators["default"] = _aggregators["default"]
_active_aggregators_cnt["default"] = 1
reset()
@contextlib.contextmanager
def aggregate(name: Optional[str] = None, new_root: bool = False):
"""Context manager to aggregate metrics under a given name.
Aggregations can be nested. If *new_root* is ``False``, then logged
metrics will be recorded along the entire stack of nested
aggregators, including a global "default" aggregator. If *new_root*
is ``True``, then this aggregator will be the root of a new
aggregation stack, thus bypassing any parent aggregators.
Note that aggregation contexts are uniquely identified by their
*name* (e.g., train, valid). Creating a context with an existing
name will reuse the corresponding :class:`MetersDict` instance.
If no name is given, then a temporary aggregator will be created.
Usage::
with metrics.aggregate("train"):
for step, batch in enumerate(epoch):
with metrics.aggregate("train_inner") as agg:
metrics.log_scalar("loss", get_loss(batch))
if step % log_interval == 0:
print(agg.get_smoothed_value("loss"))
agg.reset()
print(metrics.get_smoothed_values("train")["loss"])
Args:
name (str): name of the aggregation. Defaults to a
random/temporary name if not given explicitly.
new_root (bool): make this aggregation the root of a new
aggregation stack.
"""
if name is None:
# generate a temporary name
name = str(uuid.uuid4())
assert name not in _aggregators
agg = MetersDict()
else:
assert name != "default"
agg = _aggregators.setdefault(name, MetersDict())
if new_root:
backup_aggregators = _active_aggregators.copy()
_active_aggregators.clear()
backup_aggregators_cnt = _active_aggregators_cnt.copy()
_active_aggregators_cnt.clear()
_active_aggregators[name] = agg
_active_aggregators_cnt[name] += 1
yield agg
_active_aggregators_cnt[name] -= 1
if _active_aggregators_cnt[name] == 0 and name in _active_aggregators:
del _active_aggregators[name]
if new_root:
_active_aggregators.clear()
_active_aggregators.update(backup_aggregators)
_active_aggregators_cnt.clear()
_active_aggregators_cnt.update(backup_aggregators_cnt)
def get_active_aggregators() -> List[MetersDict]:
return list(_active_aggregators.values())
def log_scalar(
key: str,
value: float,
weight: float = 1,
priority: int = 10,
round: Optional[int] = None,
):
"""Log a scalar value.
Args:
key (str): name of the field to log
value (float): value to log
weight (float): weight that this value contributes to the average.
A weight of 0 will always log the latest value.
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, AverageMeter(round=round), priority)
agg[key].update(value, weight)
def log_derived(key: str, fn: Callable[[MetersDict], float], priority: int = 20):
"""Log a scalar value derived from other meters.
Args:
key (str): name of the field to log
fn (Callable[[MetersDict], float]): function that takes a single
argument *meters* and returns the derived value
priority (int): smaller values are logged earlier in the output
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, MetersDict._DerivedMeter(fn), priority)
def log_speed(
key: str,
value: float,
priority: int = 30,
round: Optional[int] = None,
):
"""Log the rate of some quantity per second.
Args:
key (str): name of the field to log
value (float): value to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, TimeMeter(round=round), priority)
agg[key].reset() # reset meter on the first call
else:
agg[key].update(value)
def log_start_time(key: str, priority: int = 40, round: Optional[int] = None):
"""Log the duration of some event in seconds.
The duration will be computed once :func:`log_stop_time` is called.
Args:
key (str): name of the field to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, StopwatchMeter(round=round), priority)
agg[key].start()
def log_stop_time(key: str, weight: float = 0.0, prehook=None):
"""Log the duration of some event in seconds.
The duration will be computed since :func:`log_start_time` was called.
Set weight > 0 to report the average time instead of the sum.
Args:
key (str): name of the field to log
weight (float): weight that this time contributes to the average
prehook (function, no arguments): will be called before the timer
is stopped. For example, use prehook=torch.cuda.synchronize to
make sure all gpu operations are done before timer is stopped.
"""
for agg in get_active_aggregators():
if key in agg:
agg[key].stop(weight, prehook)
def log_custom(
new_meter_fn: Callable[[], Meter],
key: str,
*args,
priority: int = 50,
**kwargs,
):
"""Log using a custom Meter.
Any extra *args* or *kwargs* will be passed through to the Meter's
*update* method.
Args:
new_meter_fn (Callable[[], Meter]): function that returns a new
Meter instance
key (str): name of the field to log
priority (int): smaller values are logged earlier in the output
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, new_meter_fn(), priority)
agg[key].update(*args, **kwargs)
def reset_meter(name: str, key: str) -> None:
"""Reset Meter instance aggregated under a given *name* and *key*."""
meter = get_meter(name, key)
if meter is not None:
meter.reset()
def reset_meters(name: str) -> None:
"""Reset Meter instances aggregated under a given *name*."""
meters = get_meters(name)
if meters is not None:
meters.reset()
def get_meter(name: str, key: str) -> Meter:
"""Get a single Meter instance aggregated under *name* and *key*.
Returns:
Meter or None if no metrics have been logged under *name* and *key*.
"""
if name not in _aggregators:
return None
return _aggregators[name].get(key, None)
def get_meters(name: str) -> MetersDict:
"""Get Meter instances aggregated under a given *name*.
Returns:
MetersDict or None if no metrics have been logged under *name*.
"""
return _aggregators.get(name, None)
def get_smoothed_value(name: str, key: str) -> float:
"""Get a single smoothed value.
Raises:
KeyError: if no metrics have been logged under *name* and *key*.
"""
return _aggregators[name].get_smoothed_value(key)
def get_smoothed_values(name: str) -> Dict[str, float]:
"""Get smoothed values aggregated under a given *name*.
Raises:
KeyError: if no metrics have been logged under *name*.
"""
return _aggregators[name].get_smoothed_values()
def state_dict():
return OrderedDict([(name, agg.state_dict()) for name, agg in _aggregators.items()])
def load_state_dict(state_dict):
for name, agg_state in state_dict.items():
_aggregators[name] = MetersDict()
_aggregators[name].load_state_dict(agg_state)
|
8253a6a3f5f89fa1e5f5d597b9a9efdcd347e920
|
1095cfe2e29ddf4e4c5e12d713bd12f45c9b6f7d
|
/src/python/gem5/components/cachehierarchies/ruby/caches/mesi_three_level/l1_cache.py
|
e746579834e7f7a3376489b7e0bb71b5be808552
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
gem5/gem5
|
9ec715ae036c2e08807b5919f114e1d38d189bce
|
48a40cf2f5182a82de360b7efa497d82e06b1631
|
refs/heads/stable
| 2023-09-03T15:56:25.819189
| 2023-08-31T05:53:03
| 2023-08-31T05:53:03
| 27,425,638
| 1,185
| 1,177
|
BSD-3-Clause
| 2023-09-14T08:29:31
| 2014-12-02T09:46:00
|
C++
|
UTF-8
|
Python
| false
| false
| 4,049
|
py
|
l1_cache.py
|
# Copyright (c) 2022 The Regents of the University of California
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .....processors.abstract_core import AbstractCore
from ......isas import ISA
from ......utils.override import *
from m5.objects import (
MessageBuffer,
RubyPrefetcher,
RubyCache,
ClockDomain,
LRURP,
L0Cache_Controller,
)
import math
# L0Cache_Controller is the ruby backend's terminology corresponding to
# L1 cache in stdlib terms.
class L1Cache(L0Cache_Controller):
_version = 0
@classmethod
def versionCount(cls):
cls._version += 1
return cls._version - 1
def __init__(
self,
l1i_size,
l1i_assoc,
l1d_size,
l1d_assoc,
network,
core: AbstractCore,
cache_line_size,
target_isa: ISA,
clk_domain: ClockDomain,
):
super().__init__()
# This is the cache memory object that stores the cache data and tags
self.Icache = RubyCache(
size=l1i_size,
assoc=l1i_assoc,
start_index_bit=self.getBlockSizeBits(cache_line_size),
is_icache=True,
replacement_policy=LRURP(),
)
self.Dcache = RubyCache(
size=l1d_size,
assoc=l1d_assoc,
start_index_bit=self.getBlockSizeBits(cache_line_size),
is_icache=False,
replacement_policy=LRURP(),
)
self.clk_domain = clk_domain
self.prefetcher = RubyPrefetcher()
self.send_evictions = core.requires_send_evicts()
self.transitions_per_cycle = 32
self.enable_prefetch = False
self.request_latency = 2
self.response_latency = 2
self.version = self.versionCount()
self.connectQueues(network)
def getBlockSizeBits(self, cache_line_size):
bits = int(math.log(cache_line_size, 2))
if 2**bits != int(cache_line_size):
raise Exception("Cache line size is not a power of 2!")
return bits
def connectQueues(self, network):
self.prefetchQueue = MessageBuffer()
self.mandatoryQueue = MessageBuffer()
self.optionalQueue = MessageBuffer()
# bufferToL1 and bufferFromL1 are ruby backend terminology.
# In stdlib terms, they are bufferToL2 and bufferFromL2 respectively.
# These buffers are connections between L1 cache and L2 cache.
# Later on, we'll need to connect those buffers to L2.
self.bufferToL1 = MessageBuffer(ordered=True)
self.bufferFromL1 = MessageBuffer(ordered=True)
|
38764c3abc78d9f668f84c48e4cb095f4f4f31e4
|
77044a7d5beabe7dbdaed4cbf0add6f877f3bf3c
|
/python/cross-stack-resources/native-objects/app.py
|
101ed9a63ad70b4d64012241a9a3c8a08c081328
|
[
"Apache-2.0"
] |
permissive
|
aws-samples/aws-cdk-examples
|
1153f27ccbcc3b647e0a3f608a69279ebff1b27c
|
63caf4f3deddc8b00c9d2ae69c6886180143c3ee
|
refs/heads/master
| 2023-09-04T04:49:37.077412
| 2023-09-01T20:48:26
| 2023-09-01T20:48:26
| 168,772,474
| 4,366
| 2,083
|
Apache-2.0
| 2023-09-11T07:57:00
| 2019-02-01T23:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 565
|
py
|
app.py
|
#!/usr/bin/env python3
from aws_cdk import App
from native_objects.application_stack import ApplicationStack
from native_objects.infrastructure_stack import InfrastructureStack
app = App()
env={'region': 'us-west-2'}
# Base infrastructure stack, Lambda Functions, DynamoDB Tables, etc....
infra = InfrastructureStack(app, "infrastructure", env=env)
# Application stack that generally changes independently of the underlying infrastructure stack
application = ApplicationStack(app, "application", referenced_function=infra.main_function, env=env)
app.synth()
|
e96a75713e7aa4fd7da4ddaa7d0738a9ef411601
|
13cca935063091d7510c3f9636a2ab817edb59e5
|
/opensfm/commands/export_report.py
|
ab6722ee22a7b80eb797c651e9ae66bf8ac0c4b4
|
[
"BSD-3-Clause",
"LGPL-3.0-only",
"HPND",
"BSD-2-Clause"
] |
permissive
|
mapillary/OpenSfM
|
488440b04546fd0dccf661771bf04838602b353e
|
c798da16206aa52395eaef09ea4d7746028479d3
|
refs/heads/main
| 2023-09-05T11:04:10.131418
| 2023-08-30T15:46:11
| 2023-08-30T15:46:11
| 26,531,519
| 3,109
| 914
|
BSD-2-Clause
| 2023-09-13T21:01:17
| 2014-11-12T10:47:29
|
Python
|
UTF-8
|
Python
| false
| false
| 465
|
py
|
export_report.py
|
from . import command
import argparse
from opensfm.dataset import DataSet
from opensfm.actions import export_report
class Command(command.CommandBase):
name = "export_report"
help = "Export a nice report based on previously generated statistics"
def run_impl(self, dataset: DataSet, args: argparse.Namespace) -> None:
export_report.run_dataset(dataset)
def add_arguments_impl(self, parser: argparse.ArgumentParser) -> None:
pass
|
24ddf1c28fda94e0dbfaac00fab945f6de257153
|
7343ece3b82ac87a594865c4074623b45b0297b4
|
/tests/config/test_registration_config.py
|
f6869d7f0645cc3c79d960040526dc19f907c5f1
|
[
"Apache-2.0"
] |
permissive
|
matrix-org/synapse
|
a00111f83310783b78e2996557f8bbae4d9fb229
|
d35bed8369514fe727b4fe1afb68f48cc8b2655a
|
refs/heads/develop
| 2023-09-05T05:24:20.808942
| 2023-09-04T16:14:09
| 2023-09-04T16:14:09
| 22,844,864
| 12,215
| 2,869
|
Apache-2.0
| 2023-09-14T15:20:48
| 2014-08-11T15:51:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,662
|
py
|
test_registration_config.py
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import synapse.app.homeserver
from synapse.config import ConfigError
from synapse.config.homeserver import HomeServerConfig
from tests.config.utils import ConfigFileTestCase
from tests.utils import default_config
class RegistrationConfigTestCase(ConfigFileTestCase):
def test_session_lifetime_must_not_be_exceeded_by_smaller_lifetimes(self) -> None:
"""
session_lifetime should logically be larger than, or at least as large as,
all the different token lifetimes.
Test that the user is faced with configuration errors if they make it
smaller, as that configuration doesn't make sense.
"""
config_dict = default_config("test")
# First test all the error conditions
with self.assertRaises(ConfigError):
HomeServerConfig().parse_config_dict(
{
"session_lifetime": "30m",
"nonrefreshable_access_token_lifetime": "31m",
**config_dict,
},
"",
"",
)
with self.assertRaises(ConfigError):
HomeServerConfig().parse_config_dict(
{
"session_lifetime": "30m",
"refreshable_access_token_lifetime": "31m",
**config_dict,
},
"",
"",
)
with self.assertRaises(ConfigError):
HomeServerConfig().parse_config_dict(
{
"session_lifetime": "30m",
"refresh_token_lifetime": "31m",
**config_dict,
},
"",
"",
)
# Then test all the fine conditions
HomeServerConfig().parse_config_dict(
{
"session_lifetime": "31m",
"nonrefreshable_access_token_lifetime": "31m",
**config_dict,
},
"",
"",
)
HomeServerConfig().parse_config_dict(
{
"session_lifetime": "31m",
"refreshable_access_token_lifetime": "31m",
**config_dict,
},
"",
"",
)
HomeServerConfig().parse_config_dict(
{"session_lifetime": "31m", "refresh_token_lifetime": "31m", **config_dict},
"",
"",
)
def test_refuse_to_start_if_open_registration_and_no_verification(self) -> None:
self.generate_config()
self.add_lines_to_config(
[
" ",
"enable_registration: true",
"registrations_require_3pid: []",
"enable_registration_captcha: false",
"registration_requires_token: false",
]
)
# Test that allowing open registration without verification raises an error
with self.assertRaises(ConfigError):
synapse.app.homeserver.setup(["-c", self.config_file])
|
3c1d97c2010a2c4e5e323818543f99216c858a81
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/stubs/TypingNamedTupleFieldsKeyword.py
|
8ab3695c9a80e4465d8a2205f157bd3c6c3a192a
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
TypingNamedTupleFieldsKeyword.py
|
from typing import NamedTuple
nt = NamedTuple("name", fields=[("x", str), ("y", int)])
|
2968f51ab5e4522fc5d885a4fb5cc32c7bd642f8
|
32b01231af56c01a0ed057222ede46490b1488ce
|
/amplify/ext/mysql/collectors/metrics.py
|
d8b31f8fc3a73c215bc3752ebd0c441f4574d357
|
[
"BSD-2-Clause"
] |
permissive
|
nginxinc/nginx-amplify-agent
|
118478cf17ff3788e5c3d807c0f9a97a584afa65
|
f6be8cf8f8bcc61c549a821bf2aba41b2d843f18
|
refs/heads/master
| 2023-08-24T18:55:44.088497
| 2022-10-25T09:54:50
| 2022-10-25T09:54:50
| 46,153,777
| 312
| 80
|
BSD-2-Clause
| 2022-06-07T16:34:53
| 2015-11-14T00:07:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,948
|
py
|
metrics.py
|
# -*- coding: utf-8 -*-
import time
from amplify.agent.common.context import context
from amplify.agent.collectors.abstract import AbstractMetricsCollector
__author__ = "Andrew Alexeev"
__copyright__ = "Copyright (C) Nginx Inc. All rights reserved."
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "dedm@nginx.com"
METRICS = {
'counters': {
'mysql.global.connections': 'Connections',
'mysql.global.questions': 'Questions',
'mysql.global.select': 'Com_select',
'mysql.global.insert': 'Com_insert',
'mysql.global.update': 'Com_update',
'mysql.global.delete': 'Com_delete',
'mysql.global.commit': 'Com_commit',
'mysql.global.slow_queries': 'Slow_queries',
'mysql.global.uptime': 'Uptime',
'mysql.global.aborted_connects': 'Aborted_connects',
'mysql.global.innodb_buffer_pool_read_requests': 'Innodb_buffer_pool_read_requests',
'mysql.global.innodb_buffer_pool_reads': 'Innodb_buffer_pool_reads'
},
'gauges': {
'mysql.global.innodb_buffer_pool_pages_total': 'Innodb_buffer_pool_pages_total',
'mysql.global.innodb_buffer_pool_pages_free': 'Innodb_buffer_pool_pages_free',
'mysql.global.threads_connected': 'Threads_connected',
'mysql.global.threads_running': 'Threads_running'
}
}
REQUIRED_STATUS_FIELDS = list(METRICS['counters'].values()) + list(METRICS['gauges'].values())
class MySQLMetricsCollector(AbstractMetricsCollector):
"""
Metrics collector. Spawned per master.
"""
short_name = 'mysql_metrics'
status_metric_key = 'mysql.status'
def __init__(self, **kwargs):
super(MySQLMetricsCollector, self).__init__(**kwargs)
self.register(
self.mysql_status
)
def mysql_status(self):
"""
Collects data from MySQLd instance
"""
stamp = int(time.time())
# get data
conn = self.object.connect()
result = {}
try:
with conn.cursor() as cursor:
for key in REQUIRED_STATUS_FIELDS:
cursor.execute('SHOW GLOBAL STATUS LIKE "%s";' % key)
row = cursor.fetchone()
result[row[0]] = row[1]
except Exception as e:
exception_name = e.__class__.__name__
context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)
context.log.debug('additional info:', exc_info=True)
finally:
conn.close()
# counters
counted_vars = {}
for metric, variable_name in METRICS['counters'].items():
if variable_name in result:
counted_vars[metric] = int(result[variable_name])
# compound counter
counted_vars['mysql.global.writes'] = \
counted_vars['mysql.global.insert'] + \
counted_vars['mysql.global.update'] + \
counted_vars['mysql.global.delete']
self.aggregate_counters(counted_vars, stamp=stamp)
# gauges
tracked_gauges = {}
for metric, variable_name in METRICS['gauges'].items():
if variable_name in result:
tracked_gauges[metric] = {
self.object.definition_hash: int(result[variable_name])
}
# compound gauges
pool_util = 0
if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and
tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):
pool_util = (
(tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -
tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /
tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100
)
tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {
self.object.definition_hash: pool_util
}
hit_ratio = 0
if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and
tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):
hit_ratio = (
(tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /
(tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +
tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100
)
tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {
self.object.definition_hash: hit_ratio
}
self.aggregate_gauges(tracked_gauges, stamp=stamp)
# finalize
self.increment_counters()
self.finalize_gauges()
|
d5688d4f6edc5291807d543df2e1e2ca6ee9b819
|
8a40a3db07eec18178c9b8757aafdb35724ff324
|
/examples/server/sanic/latency.py
|
a451d113b734647154351a7e5b787907f3b5aa6a
|
[
"MIT"
] |
permissive
|
miguelgrinberg/python-engineio
|
52313e7fd2cd740e5a083976231c056d53c9a590
|
35cc5ec0a69b5274697928af4a163e0ca42e1afb
|
refs/heads/main
| 2023-08-18T05:55:37.901376
| 2023-08-15T18:02:04
| 2023-08-15T18:02:04
| 37,830,040
| 236
| 175
|
MIT
| 2023-09-03T15:13:49
| 2015-06-21T23:17:21
|
Python
|
UTF-8
|
Python
| false
| false
| 437
|
py
|
latency.py
|
from sanic import Sanic
from sanic.response import html
import engineio
eio = engineio.AsyncServer(async_mode='sanic')
app = Sanic(name='latency')
eio.attach(app)
@app.route('/')
async def index(request):
with open('latency.html') as f:
return html(f.read())
@eio.on('message')
async def message(sid, data):
await eio.send(sid, 'pong')
app.static('/static', './static')
if __name__ == '__main__':
app.run()
|
5282746f140a504b8d6d200e8dd1e4badab3a066
|
48ca6f9f041a1e9f563500c8a7fa04dbb18fa949
|
/pygears/sim/extens/svsock.py
|
3e2032390627637a55a136e78944131959242b33
|
[
"MIT"
] |
permissive
|
bogdanvuk/pygears
|
71404e53d4689ec9cdd9db546bfc0f229a7e02da
|
705b11ab6de79868b25753fa9d0ce7128791b346
|
refs/heads/master
| 2023-07-08T11:38:54.625172
| 2022-03-07T12:29:00
| 2022-03-07T12:29:00
| 124,890,922
| 146
| 16
|
MIT
| 2022-08-15T07:57:08
| 2018-03-12T13:10:06
|
Python
|
UTF-8
|
Python
| false
| false
| 7,607
|
py
|
svsock.py
|
import time
import atexit
import tempfile
from math import ceil
import jinja2
import array
import math
import os
import socket
import logging
from pygears.definitions import ROOT_DIR
from subprocess import DEVNULL, Popen
from pygears import reg
from pygears.sim.extens.sim_extend import SimExtend
from pygears.sim import SimPlugin
from pygears.util.fileio import save_file
from pygears.sim import log
from pygears.conf import inject, Inject
CMD_SYS_RESET = 0x80000000
CMD_SET_DATA = 0x40000000
CMD_RESET = 0x20000000
CMD_FORWARD = 0x10000000
CMD_CYCLE = 0x08000000
CMD_READ = 0x04000000
CMD_ACK = 0x02000000
CMD_FINISH = 0x01000000
class CosimulatorStartError(Exception):
pass
class CosimulatorUnavailable(Exception):
pass
def register_exten():
if SVSock not in reg['sim/extens']:
reg['sim/extens'].append(SVSock)
if reg['sim/simulator'] is not None:
SVSock(top=None)
@inject
def register_intf(desc, intfs=Inject('sim/svsock/intfs')):
register_exten()
intfs.append(desc)
return len(intfs) - 1
def u32_repr_gen(data, dtype):
for i in range(ceil(dtype.width / 32)):
yield data & 0xffffffff
data >>= 32
def u32_repr(data, dtype):
return array.array('I', u32_repr_gen(dtype(data).code(), dtype))
def u32_bytes_to_int(data):
arr = array.array('I')
arr.frombytes(data)
val = 0
for val32 in reversed(arr):
val <<= 32
val |= val32
return val
def u32_bytes_decode(data, dtype):
return dtype.decode(u32_bytes_to_int(data) & ((1 << dtype.width) - 1))
class SVSock(SimExtend):
@inject
def __init__(self, run=Inject('sim/svsock/run'), **kwds):
reg['sim/svsock/server'] = self
self.run_cosim = run
self.kwds = kwds
self.sock = None
self.conn = None
self.cosim_pid = None
super().__init__()
atexit.register(self.finish)
@property
@inject
def outdir(self, outdir=Inject('results-dir')):
return os.path.join(outdir, 'svsock')
@inject
def build(self, intfs=Inject('sim/svsock/intfs')):
base_addr = os.path.dirname(__file__)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(base_addr),
trim_blocks=True,
lstrip_blocks=True)
env.globals.update(zip=zip,
int=int,
print=print,
issubclass=issubclass)
context = {'port': self.port}
for phase in [
'declaration', 'init', 'set_data', 'read', 'ack', 'reset',
'sys_reset'
]:
context[phase] = {}
for i, intf in enumerate(intfs):
if not hasattr(intf, phase):
continue
phase_sv = getattr(intf, phase)()
if phase_sv is not None:
context[phase][i] = phase_sv
res = env.get_template('svsock_top.j2').render(context)
save_file('_top.sv', self.outdir, res)
def sendall(self, pkt):
self.conn.sendall(pkt)
def send_cmd(self, req):
pkt = req.to_bytes(4, byteorder='little')
self.conn.sendall(pkt)
def recv(self, size):
return self.conn.recv(size)
def dtype_send(self, data, dtype):
pkt = u32_repr(data, dtype).tobytes()
self.sendall(pkt)
def dtype_recv(self, dtype):
buff_size = math.ceil(dtype.width / 8)
if buff_size < 4:
buff_size = 4
if buff_size % 4:
buff_size += 4 - (buff_size % 4)
data = self.recv(buff_size)
return u32_bytes_decode(data, dtype)
@inject
def invoke_cosim(self, intfs=Inject('sim/svsock/intfs')):
dpi_path = os.path.abspath(os.path.join(ROOT_DIR, 'sim', 'dpi'))
context = {}
context['files'] = [os.path.join(dpi_path, 'sock.sv')]
context['includes'] = []
for param in ['files', 'includes']:
for i, intf in enumerate(intfs):
if not hasattr(intf, param):
continue
param_val = getattr(intf, param)()
if param_val is not None:
context[param].extend(param_val)
context['includes'].extend([dpi_path, self.outdir])
context['files'].extend([os.path.join(self.outdir, '_top.sv')])
if not reg['sim/svsock/backend']:
raise CosimulatorStartError('No registered cosimulators')
cosim_pid = None
for b in reg['sim/svsock/backend'].values():
try:
cosim_pid = b(outdir=self.outdir,
files=context['files'],
includes=context['includes'],
makefile=not self.run_cosim)
except CosimulatorUnavailable:
pass
else:
break
else:
raise CosimulatorStartError(
f'No available cosimulator executables found for any of the plugins: '
f'{",".join(reg["sim/svsock/backend"].keys())}')
return cosim_pid
def before_run(self, sim):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if self.run_cosim:
import uuid
filename = str(uuid.uuid4())
else:
filename = "svsock.s"
self.port = os.path.join(tempfile.gettempdir(), filename)
if os.path.exists(self.port):
os.remove(self.port)
self.sock.bind(self.port)
# Listen for incoming connections
# self.sock.listen(len(self.gear.in_ports) + len(self.gear.out_ports))
self.sock.listen(1)
self.build()
# if self.rebuild:
# self.build()
# else:
# self.kwds['nobuild'] = True
if not self.run_cosim:
self.invoke_cosim()
self.conn, addr = self.sock.accept()
else:
self.sock.settimeout(5)
self.cosim_pid = self.invoke_cosim()
ret = None
while ret is None:
try:
self.conn, addr = self.sock.accept()
break
except socket.timeout:
ret = self.cosim_pid.poll()
if ret is not None:
log.error(
f'Cosimulator error: {ret}. Check log File "{self.outdir}/log.log"'
)
raise CosimulatorStartError
msg = self.conn.recv(1024)
port_name = msg.decode()
log.debug(f"Connection received for {port_name}")
def finish(self):
if self.sock:
if self.conn:
try:
self.send_cmd(CMD_FINISH)
time.sleep(0.5)
except BrokenPipeError:
pass
log.info(f'Done. Closing the socket...')
self.sock.close()
time.sleep(1)
if self.cosim_pid is not None:
self.cosim_pid.terminate()
self.sock = None
self.cosim_pid = None
self.conn = None
atexit.unregister(self.finish)
def after_cleanup(self, sim):
self.finish()
class SVSockPlugin(SimPlugin):
@classmethod
def bind(cls):
reg.confdef('sim/svsock/backend', default={})
reg.confdef('sim/svsock/run', default=True)
reg['sim/svsock/intfs'] = []
reg['sim/svsock/server'] = None
|
b0e1134a7a8ec5906f1727fdfd30fa75cd39acc0
|
961c62d9dc6c9699a9c9ce4f320447cf6441d108
|
/src/human_aware_rl/human/tests.py
|
9870d8e1b59e6f19b2f51e0ad281e61e87ee1245
|
[
"MIT"
] |
permissive
|
HumanCompatibleAI/overcooked_ai
|
e6defb38c82e37b79d918318ea5ec07d9ab7d266
|
6bde8b27b5a1dcdba571e8f53d98c6fc836eca8c
|
refs/heads/master
| 2023-07-20T17:18:53.025742
| 2023-07-11T23:36:49
| 2023-07-11T23:36:49
| 193,407,023
| 510
| 119
|
MIT
| 2023-07-11T23:36:51
| 2019-06-23T23:58:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,837
|
py
|
tests.py
|
import copy
import os
import pickle
import shutil
import sys
import unittest
import numpy as np
from numpy.testing._private.utils import assert_raises
from human_aware_rl.human.process_dataframes import (
csv_to_df_pickle,
get_trajs_from_data,
)
from human_aware_rl.human.process_human_trials import (
main as process_human_trials_main,
)
from human_aware_rl.static import *
from human_aware_rl.utils import equal_dicts
from overcooked_ai_py.agents.agent import AgentPair, GreedyHumanModel
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.mdp.overcooked_mdp import (
OvercookedGridworld,
OvercookedState,
)
from overcooked_ai_py.planning.planners import (
NO_COUNTERS_PARAMS,
MediumLevelActionManager,
)
class TestProcessDataFrames(unittest.TestCase):
temp_data_dir = "this_is_a_temp"
data_len_2019 = 3546
data_len_2020 = 1189
base_csv_to_df_params = {
"csv_path": DUMMY_2020_RAW_HUMAN_DATA_PATH,
"out_dir": "this_is_a_temp",
"out_file_prefix": "unittest",
"button_presses_threshold": 0.25,
"perform_train_test_split": False,
"silent": True,
}
base_get_trajs_from_data_params = {
"data_path": DUMMY_2019_CLEAN_HUMAN_DATA_PATH,
"featurize_states": False,
"check_trajectories": False,
"silent": True,
"layouts": ["cramped_room"],
}
def setUp(self):
print(
"\nIn Class {}, in Method {}".format(
self.__class__.__name__, self._testMethodName
)
)
if not os.path.exists(self.temp_data_dir):
os.makedirs(self.temp_data_dir)
def tearDown(self):
shutil.rmtree(self.temp_data_dir)
def test_csv_to_df_pickle_2019(self):
params = copy.deepcopy(self.base_csv_to_df_params)
params["csv_path"] = DUMMY_2019_RAW_HUMAN_DATA_PATH
params["button_presses_threshold"] = 0.0
data = csv_to_df_pickle(**params)
self.assertEqual(len(data), self.data_len_2019)
params = copy.deepcopy(self.base_csv_to_df_params)
params["csv_path"] = DUMMY_2019_RAW_HUMAN_DATA_PATH
params["button_presses_threshold"] = 0.7
data = csv_to_df_pickle(**params)
self.assertLess(len(data), self.data_len_2019)
def test_csv_to_df_pickle_2020(self):
params = copy.deepcopy(self.base_csv_to_df_params)
params["button_presses_threshold"] = 0.0
data = csv_to_df_pickle(**params)
self.assertEqual(len(data), self.data_len_2020)
params = copy.deepcopy(self.base_csv_to_df_params)
params["button_presses_threshold"] = 0.7
data = csv_to_df_pickle(**params)
self.assertLess(len(data), self.data_len_2020)
def test_csv_to_df_pickle(self):
# Try various button thresholds (hand-picked to lie between different values for dummy data games)
button_thresholds = [0.2, 0.6, 0.7]
lengths = []
for threshold in button_thresholds:
# dummy dataset is too small to partion so we set train_test_split=False
params = copy.deepcopy(self.base_csv_to_df_params)
params["button_presses_threshold"] = threshold
data = csv_to_df_pickle(**params)
lengths.append(len(data))
# Filtered data size should be monotonically decreasing wrt button_threshold
for i in range(len(lengths) - 1):
self.assertGreaterEqual(lengths[i], lengths[i + 1])
# Picking a threshold that's suficiently high discards all data, should result in value error
params = copy.deepcopy(self.base_csv_to_df_params)
params["button_presses_threshold"] = 0.8
self.assertRaises(ValueError, csv_to_df_pickle, **params)
def test_get_trajs_from_data_2019(self):
params = copy.deepcopy(self.base_get_trajs_from_data_params)
trajectories, _ = get_trajs_from_data(**params)
def test_get_trajs_from_data_2019_featurize(self):
params = copy.deepcopy(self.base_get_trajs_from_data_params)
params["featurize_states"] = True
trajectories, _ = get_trajs_from_data(**params)
def test_get_trajs_from_data_2020(self):
# Ensure we can properly deserialize states with updated objects (i.e tomatoes)
params = copy.deepcopy(self.base_get_trajs_from_data_params)
params["layouts"] = ["inverse_marshmallow_experiment"]
params["data_path"] = DUMMY_2020_CLEAN_HUMAN_DATA_PATH
trajectories, _ = get_trajs_from_data(**params)
def test_get_trajs_from_data_2020_featurize(self):
# Ensure we can properly featurize states with updated dynamics and updated objects (i.e tomatoes)
params = copy.deepcopy(self.base_get_trajs_from_data_params)
params["layouts"] = ["inverse_marshmallow_experiment"]
params["data_path"] = DUMMY_2020_CLEAN_HUMAN_DATA_PATH
params["featurize_states"] = True
trajectories, _ = get_trajs_from_data(**params)
def test_csv_to_df_to_trajs_integration(self):
# Ensure the output of 'csv_to_df_pickle' works as valid input to 'get_trajs_from_data'
params = copy.deepcopy(self.base_csv_to_df_params)
_ = csv_to_df_pickle(**params)
params = copy.deepcopy(self.base_get_trajs_from_data_params)
params["data_path"] = os.path.join(
self.temp_data_dir, "unittest_all.pickle"
)
params["layouts"] = ["inverse_marshmallow_experiment"]
_ = get_trajs_from_data(**params)
class TestHumanDataConversion(unittest.TestCase):
temp_dir = "this_is_also_a_temp"
infile = DUMMY_2019_CLEAN_HUMAN_DATA_PATH
horizon = 400
DATA_TYPE = "train"
layout_name = "cramped_room"
def _equal_pickle_and_env_state_dict(
self, pickle_state_dict, env_state_dict
):
return equal_dicts(
pickle_state_dict,
env_state_dict,
["timestep", "all_orders", "bonus_orders"],
)
def setUp(self):
print(
"\nIn Class {}, in Method {}".format(
self.__class__.__name__, self._testMethodName
)
)
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
self.base_mdp = OvercookedGridworld.from_layout_name(self.layout_name)
self.mlam = MediumLevelActionManager.from_pickle_or_compute(
self.base_mdp, NO_COUNTERS_PARAMS, force_compute=True, info=False
)
self.env = OvercookedEnv.from_mdp(
self.base_mdp, horizon=self.horizon, info_level=0
)
self.starting_state_dict = (
self.base_mdp.get_standard_start_state().to_dict()
)
outfile = process_human_trials_main(
self.infile,
self.temp_dir,
insert_interacts=True,
verbose=False,
forward_port=False,
fix_json=False,
)
with open(outfile, "rb") as f:
self.human_data = pickle.load(f)[self.layout_name]
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_state(self):
idx = 0
for state_dict, joint_action in self.human_data[:100]:
if state_dict.items() == self.starting_state_dict.items():
self.env.reset()
else:
self.assertTrue(
self._equal_pickle_and_env_state_dict(
state_dict, self.env.state.to_dict()
),
"Expected state:\t\n{}\n\nActual state:\t\n{}".format(
self.env.state.to_dict(), state_dict
),
)
self.env.step(joint_action=joint_action)
idx += 1
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.