id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
298,600 | test missing file | # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2021 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the partial-object-validate command."""
import pathlib
import sys
from _pytest.monkeypatch import MonkeyPatch
import pytest
import tests.test_utils as test_utils
from trestle.cli import Trestle
from trestle.common.err import TrestleError
from trestle.core.commands.partial_object_validate import PartialObjectValidate
benchmark_args = ['sample_file', 'element_path', 'rc']
benchmark_values = [
(pathlib.Path('json/minimal_catalog.json'), 'catalog', 0),
(pathlib.Path('split_merge/load_distributed/groups.json'), 'catalog.groups', 0),
(pathlib.Path('split_merge/load_distributed/groups.json'), 'catalog.groups.group.groups', 0)
]
bechmark_values_failed = [
(pathlib.Path('json/minimal_catalog.json'), 'catalog.metadata', 4),
(pathlib.Path('split_merge/load_distributed/groups.json'), 'catalog.groups.group', 4),
(pathlib.Path('json/minimal_catalog_missing_uuid.json'), 'catalog', 4),
(pathlib.Path('json/minimal_catalog.json'), 'catalogs', 4)
]
def METHOD_NAME(tmp_path: pathlib.Path, monkeypatch: MonkeyPatch) -> None:
"""Test what happens when a file is missing."""
fake_catalog_path = tmp_path / 'catalog.json'
element_str = 'catalog'
command_str = f'trestle partial-object-validate -f {str(fake_catalog_path)} -e {element_str}'
monkeypatch.setattr(sys, 'argv', command_str.split())
rc = Trestle().run()
assert rc == 1
@pytest.mark.parametrize(benchmark_args, benchmark_values)
def test_partial_object_validate(
sample_file: pathlib.Path, element_path: str, rc: int, testdata_dir: pathlib.Path
) -> None:
"""Test partial object validation with various combinations."""
full_path = testdata_dir / sample_file
actual_rc = PartialObjectValidate.partial_object_validate(full_path, element_path)
assert rc == actual_rc
@pytest.mark.parametrize(benchmark_args, bechmark_values_failed)
def test_partial_object_validate_fail(
sample_file: pathlib.Path, element_path: str, rc: int, testdata_dir: pathlib.Path
) -> None:
"""Test partial object validation with various combinations."""
full_path = testdata_dir / sample_file
with pytest.raises(TrestleError):
PartialObjectValidate.partial_object_validate(full_path, element_path)
@pytest.mark.parametrize(benchmark_args, benchmark_values)
def test_cli(
sample_file: str, element_path: str, rc: int, testdata_dir: pathlib.Path, monkeypatch: MonkeyPatch
) -> None:
"""Test the CLI directly."""
full_path = testdata_dir / sample_file
command_str = f'trestle partial-object-validate -f {str(full_path)} -e {element_path}'
monkeypatch.setattr(sys, 'argv', command_str.split())
cli_rc = Trestle().run()
assert rc == cli_rc
def test_for_failure_on_multiple_element_paths(testdata_dir: pathlib.Path, monkeypatch: MonkeyPatch) -> None:
"""Test whether a bad element string correctly errors."""
element_str = "'catalogs,profile'"
full_path = testdata_dir / 'json/minimal_catalog.json'
command_str = f'trestle partial-object-validate -f {str(full_path)} -e {element_str}'
monkeypatch.setattr(sys, 'argv', command_str.split())
rc = Trestle().run()
assert rc > 0
def test_handling_unexpected_exception(testdata_dir: pathlib.Path, monkeypatch: MonkeyPatch) -> None:
"""Test whether a bad element string correctly errors."""
element_str = 'catalog'
full_path = testdata_dir / 'json/minimal_catalog.json'
command_str = f'trestle partial-object-validate -f {str(full_path)} -e {element_str}'
monkeypatch.setattr(sys, 'argv', command_str.split())
monkeypatch.setattr(
'trestle.core.commands.partial_object_validate.PartialObjectValidate.partial_object_validate',
test_utils.patch_raise_exception
)
rc = Trestle().run()
assert rc > 0 |
298,601 | nodes in cell | #-------------------------------------------------------------------------------
# NestedGridNeighbor
#-------------------------------------------------------------------------------
from PYB11Generator import *
from Neighbor import *
from NeighborAbstractMethods import *
@PYB11template("Dimension")
class NestedGridNeighbor(Neighbor):
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef NodeList<%(Dimension)s> NodeListType;
typedef GridCellIndex<%(Dimension)s> GridCellIndexType;
typedef GeomPlane<%(Dimension)s> Plane;
"""
#...........................................................................
# Constructors
def pyinit(self,
nodeList = "NodeListType&",
searchType = ("const NeighborSearchType", "NeighborSearchType::GatherScatter"),
numGridLevels = ("int", "31"),
topGridCellSize = ("double", "100.0"),
origin = ("Vector", "Vector::zero"),
kernelExtent = ("const double", "2.0"),
gridCellInfluenceRadius = ("int", "1")):
"Construct a NestedGridNeighbor"
#...........................................................................
# Methods
@PYB11const
def gridLevel(self, nodeID="const int"):
"Find the gridlevel for the given nodeID"
return "int"
@PYB11pycppname("gridLevel")
@PYB11const
def gridLevel1(self, H="const SymTensor&"):
"Find the gridlevel for the given H"
return "int"
@PYB11pycppname("gridLevel")
@PYB11const
def gridLevel2(self, H="const Scalar&"):
"Find the gridlevel for the given H"
return "int"
@PYB11const
def gridCellIndex(self, nodeID="const int", gridLevel="const int"):
"Find the GridCellIndex for the given node on the given level"
return "GridCellIndexType"
@PYB11pycppname("gridCellIndex")
@PYB11const
def gridCellIndex1(self, position="const Vector&", gridLevel="const int"):
"Find the GridCellIndex for the given position on the given level"
return "GridCellIndexType"
def translateGridCellRange(self):
return
def cellOccupied(self):
"Test if the given (grid cell, grid level) is occupied"
return
@PYB11returnpolicy("reference_internal")
@PYB11const
def occupiedGridCells(self):
"The full set of occupied gridcells on all gridlevels"
return "const std::vector<std::vector<GridCellIndexType>>&"
@PYB11returnpolicy("reference_internal")
@PYB11pycppname("occupiedGridCells")
@PYB11const
def occupiedGridCells1(self, gridLevel="const int"):
"The set of occupied gridcells on the given gridlevel"
return "const std::vector<GridCellIndexType>&"
def headOfGridCell(self):
"Return the head of the chain for (grid cell, grid level)"
def nextNodeInCell(self):
"Find the next node in the chain from a given node"
def internalNodesInCell(self):
"Return a list of the internal nodes in the given (grid cell, grid level)"
def METHOD_NAME(self):
"Return a list of the nodes in the given (grid cell, grid level)"
def appendNodesInCell(self):
"Add to the chain of nodes for a given (grid cell, grid level)"
def occupiedGridCellsInRange(self):
"Find the occupied grid cells given (min, max) cells and grid level"
def gridNormal(self):
"Convert a coordinate vector to an integer normal"
def mapGridCell(self):
"Map a (grid cell, grid level) through a pair of planes"
@PYB11const
def setNestedMasterList(self,
gridCell = "const GridCellIndexType&",
gridLevel = "const int",
masterList = "std::vector<int>&",
coarseNeighbors = "std::vector<int>&",
ghostConnectivity = "const bool"):
"Worker method used to set master/coarse information"
return "void"
def findNestedNeighbors(self):
"Return the neighbors for the given (grid cell, grid level)"
@PYB11virtual
@PYB11const
def valid(self):
"Test if the Neighbor is valid, i.e., ready to be queried for connectivity information."
return "bool"
#...........................................................................
# Properties
numGridLevels = PYB11property("int", "numGridLevels", "numGridLevels", doc="The maximum number of grid levels allowed")
numOccupiedGridLevels = PYB11property("int", "numOccupiedGridLevels", doc="The number of grid levels populated by nodes")
occupiedGridLevels = PYB11property("std::vector<int>", "occupiedGridLevels", doc="Array of the occupied grid levels")
origin = PYB11property("const Vector&", "origin", "origin", doc="The origin for computing the GridCellIndex of a coordinate Vector")
topGridSize = PYB11property("double", "topGridSize", "topGridSize", doc="The cell size on the coarsest (top) grid level")
gridCellInfluenceRadius = PYB11property("int", "gridCellInfluenceRadius", "gridCellInfluenceRadius", doc="The radius in grid cells on a level a cell can interact with")
gridCellSizeInv = PYB11property("const std::vector<double>&", "gridCellSizeInv", doc="The array of 1/grid cell size for each level")
nodeInCell = PYB11property("const std::vector<std::vector<GridCellIndexType>>&", "nodeInCell", doc="The cell each node is in")
#masterGridLevel = PYB11property("int", "masterGridLevel", doc="The current master grid level")
#masterGridCellIndex = PYB11property("const GridCellIndexType&", "masterGridCellIndex", doc="The current master grid cell index")
endOfLinkList = PYB11property("int", "endOfLinkList", doc="Value used to terminate a link list chain")
#-------------------------------------------------------------------------------
# Add the virtual interface
#-------------------------------------------------------------------------------
PYB11inject(NeighborAbstractMethods, NestedGridNeighbor, virtual=True) |
298,602 | db validate func | from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, Optional
from chia.consensus.default_constants import DEFAULT_CONSTANTS
from chia.full_node.block_store import BlockRecordDB
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.full_block import FullBlock
from chia.util.config import load_config
from chia.util.path import path_from_root
def METHOD_NAME(
root_path: Path,
in_db_path: Optional[Path] = None,
*,
validate_blocks: bool,
) -> None:
if in_db_path is None:
config: Dict[str, Any] = load_config(root_path, "config.yaml")["full_node"]
selected_network: str = config["selected_network"]
db_pattern: str = config["database_path"]
db_path_replaced: str = db_pattern.replace("CHALLENGE", selected_network)
in_db_path = path_from_root(root_path, db_path_replaced)
validate_v2(in_db_path, validate_blocks=validate_blocks)
print(f"\n\nDATABASE IS VALID: {in_db_path}\n")
def validate_v2(in_path: Path, *, validate_blocks: bool) -> None:
import sqlite3
from contextlib import closing
import zstd
if not in_path.exists():
print(f"input file doesn't exist. {in_path}")
raise RuntimeError(f"can't find {in_path}")
print(f"opening file for reading: {in_path}")
with closing(sqlite3.connect(in_path)) as in_db:
# read the database version
try:
with closing(in_db.execute("SELECT * FROM database_version")) as cursor:
row = cursor.fetchone()
if row is None or row == []:
raise RuntimeError("Database is missing version field")
if row[0] != 2:
raise RuntimeError(f"Database has the wrong version ({row[0]} expected 2)")
except sqlite3.OperationalError:
raise RuntimeError("Database is missing version table")
try:
with closing(in_db.execute("SELECT hash FROM current_peak WHERE key = 0")) as cursor:
row = cursor.fetchone()
if row is None or row == []:
raise RuntimeError("Database is missing current_peak field")
peak = bytes32(row[0])
except sqlite3.OperationalError:
raise RuntimeError("Database is missing current_peak table")
print(f"peak hash: {peak}")
with closing(in_db.execute("SELECT height FROM full_blocks WHERE header_hash = ?", (peak,))) as cursor:
peak_row = cursor.fetchone()
if peak_row is None or peak_row == []:
raise RuntimeError("Database is missing the peak block")
peak_height = peak_row[0]
print(f"peak height: {peak_height}")
print("traversing the full chain")
current_height = peak_height
# we're looking for a block with this hash
expect_hash = peak
# once we find it, we know what the next block to look for is, which
# this is set to
next_hash = None
num_orphans = 0
height_to_hash = bytearray(peak_height * 32)
with closing(
in_db.execute(
f"SELECT header_hash, prev_hash, height, in_main_chain"
f"{', block, block_record' if validate_blocks else ''} "
"FROM full_blocks ORDER BY height DESC"
)
) as cursor:
for row in cursor:
hh = row[0]
prev = row[1]
height = row[2]
in_main_chain = row[3]
# if there are blocks being added to the database, just ignore
# the ones added since we picked the peak
if height > peak_height:
continue
if validate_blocks:
block = FullBlock.from_bytes(zstd.decompress(row[4]))
block_record: BlockRecordDB = BlockRecordDB.from_bytes(row[5])
actual_header_hash = block.header_hash
actual_prev_hash = block.prev_header_hash
if actual_header_hash != hh:
raise RuntimeError(
f"Block {hh.hex()} has a blob with mismatching hash: {actual_header_hash.hex()}"
)
if block_record.header_hash != hh:
raise RuntimeError(
f"Block {hh.hex()} has a block record with mismatching "
f"hash: {block_record.header_hash.hex()}"
)
if block_record.total_iters != block.total_iters:
raise RuntimeError(
f"Block {hh.hex()} has a block record with mismatching total "
f"iters: {block_record.total_iters} expected {block.total_iters}"
)
if block_record.prev_hash != actual_prev_hash:
raise RuntimeError(
f"Block {hh.hex()} has a block record with mismatching "
f"prev_hash: {block_record.prev_hash} expected {actual_prev_hash.hex()}"
)
if block.height != height:
raise RuntimeError(
f"Block {hh.hex()} has a mismatching height: {block.height} expected {height}"
)
if height != current_height:
# we're moving to the next level. Make sure we found the block
# we were looking for at the previous level
if next_hash is None:
raise RuntimeError(
f"Database is missing the block with hash {expect_hash} at height {current_height}"
)
expect_hash = next_hash
next_hash = None
current_height = height
if hh == expect_hash:
if next_hash is not None:
raise RuntimeError(f"Database has multiple blocks with hash {hh.hex()}, at height {height}")
if not in_main_chain:
raise RuntimeError(
f"block {hh.hex()} (height: {height}) is part of the main chain, "
f"but in_main_chain is not set"
)
if validate_blocks:
if actual_prev_hash != prev:
raise RuntimeError(
f"Block {hh.hex()} has a blob with mismatching "
f"prev-hash: {actual_prev_hash}, expected {prev}"
)
next_hash = prev
height_to_hash[height * 32 : height * 32 + 32] = hh
print(f"\r{height} orphaned blocks: {num_orphans} ", end="")
else:
if in_main_chain:
raise RuntimeError(f"block {hh.hex()} (height: {height}) is orphaned, but in_main_chain is set")
num_orphans += 1
print("")
if current_height != 0:
raise RuntimeError(f"Database is missing blocks below height {current_height}")
# make sure the prev_hash pointer of block height 0 is the genesis
# challenge
if next_hash != DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA:
raise RuntimeError(
f"Blockchain has invalid genesis challenge {next_hash}, expected "
f"{DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA.hex()}"
)
if num_orphans > 0:
print(f"{num_orphans} orphaned blocks") |
298,603 | reference result | #!/usr/bin/env python3
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import numpy as np
import argparse
import scipy
import random
import dace
from dace.fpga_testing import fpga_test
from dace.memlet import Memlet
import dace.libraries.blas as blas
from dace.transformation.interstate import FPGATransformSDFG, InlineSDFG
from dace.transformation.dataflow import StreamingMemory
from dace.libraries.standard.memory import aligned_ndarray
def run_test(configs, target):
n = int(1 << 13)
for i, config in enumerate(configs):
a, veclen, dtype = config
x = aligned_ndarray(np.random.uniform(0, 100, n).astype(dtype.type), alignment=256)
y = aligned_ndarray(np.random.uniform(0, 100, n).astype(dtype.type), alignment=256)
y_ref = y.copy()
a = dtype(a)
ref_result = METHOD_NAME(x, y_ref, a)
if target == "fpga_stream":
sdfg = stream_fpga_graph(veclen, dtype, "fpga", i)
elif target == "fpga_array":
sdfg = fpga_graph(veclen, dtype, "fpga", i)
else:
sdfg = pure_graph(veclen, dtype, "pure", i)
program = sdfg.compile()
with dace.config.set_temporary('compiler', 'allow_view_arguments', value=True):
if target in ["fpga_stream", "fpga_array"]:
program(x=x, y=y, a=a, n=np.int32(n))
ref_norm = np.linalg.norm(y - ref_result) / n
else:
program(x=x, y=y, a=a, n=np.int32(n))
ref_norm = np.linalg.norm(y - ref_result) / n
if ref_norm >= 1e-5:
raise ValueError(f"Failed validation for target {target}.")
return sdfg
def METHOD_NAME(x_in, y_in, alpha):
return scipy.linalg.blas.saxpy(x_in, y_in, a=alpha)
def pure_graph(veclen, dtype, implementation, test_case):
n = dace.symbol("n")
a = dace.symbol("a")
sdfg_name = f"axpy_test_{implementation}_{test_case}_w{veclen}"
sdfg = dace.SDFG(sdfg_name)
test_state = sdfg.add_state("test_state")
vtype = dace.vector(dtype, veclen)
sdfg.add_symbol(a.name, dtype)
sdfg.add_array("x", shape=[n / veclen], dtype=vtype)
sdfg.add_array("y", shape=[n / veclen], dtype=vtype)
x_in = test_state.add_read("x")
y_in = test_state.add_read("y")
y_out = test_state.add_write("y")
axpy_node = blas.axpy.Axpy("axpy", a)
axpy_node.implementation = implementation
test_state.add_memlet_path(x_in, axpy_node, dst_conn="_x", memlet=Memlet(f"x[0:n/{veclen}]"))
test_state.add_memlet_path(y_in, axpy_node, dst_conn="_y", memlet=Memlet(f"y[0:n/{veclen}]"))
test_state.add_memlet_path(axpy_node, y_out, src_conn="_res", memlet=Memlet(f"y[0:n/{veclen}]"))
sdfg.expand_library_nodes()
return sdfg
def test_pure():
configs = [(0.5, 1, dace.float32), (1.0, 4, dace.float64)]
run_test(configs, "pure")
def fpga_graph(veclen, dtype, test_case, expansion):
sdfg = pure_graph(veclen, dtype, test_case, expansion)
sdfg.apply_transformations_repeated([FPGATransformSDFG, InlineSDFG])
return sdfg
def stream_fpga_graph(veclen, precision, test_case, expansion):
sdfg = fpga_graph(veclen, precision, test_case, expansion)
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated([InlineSDFG, StreamingMemory], [{}, {"storage": dace.StorageType.FPGA_Local}])
return sdfg
@fpga_test()
def test_axpy_fpga_array():
configs = [(0.5, 1, dace.float32), (1.0, 4, dace.float64)]
return run_test(configs, "fpga_array")
@fpga_test()
def test_axpy_fpga_stream():
configs = [(0.5, 1, dace.float32), (1.0, 4, dace.float64)]
return run_test(configs, "fpga_stream")
if __name__ == "__main__":
cmdParser = argparse.ArgumentParser(allow_abbrev=False)
cmdParser.add_argument("--target", dest="target", default="pure")
args = cmdParser.parse_args()
if args.target == "fpga":
test_axpy_fpga_array(None)
test_axpy_fpga_stream(None)
elif args.target == "pure":
test_pure()
else:
raise RuntimeError(f"Unknown target \"{args.target}\".") |
298,604 | expires at | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetAuthorizationTokenResult',
'AwaitableGetAuthorizationTokenResult',
'get_authorization_token',
'get_authorization_token_output',
]
@pulumi.output_type
class GetAuthorizationTokenResult:
"""
A collection of values returned by getAuthorizationToken.
"""
def __init__(__self__, authorization_token=None, METHOD_NAME=None, id=None, password=None, proxy_endpoint=None, registry_id=None, user_name=None):
if authorization_token and not isinstance(authorization_token, str):
raise TypeError("Expected argument 'authorization_token' to be a str")
pulumi.set(__self__, "authorization_token", authorization_token)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'expires_at' to be a str")
pulumi.set(__self__, "expires_at", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if password and not isinstance(password, str):
raise TypeError("Expected argument 'password' to be a str")
pulumi.set(__self__, "password", password)
if proxy_endpoint and not isinstance(proxy_endpoint, str):
raise TypeError("Expected argument 'proxy_endpoint' to be a str")
pulumi.set(__self__, "proxy_endpoint", proxy_endpoint)
if registry_id and not isinstance(registry_id, str):
raise TypeError("Expected argument 'registry_id' to be a str")
pulumi.set(__self__, "registry_id", registry_id)
if user_name and not isinstance(user_name, str):
raise TypeError("Expected argument 'user_name' to be a str")
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter(name="authorizationToken")
def authorization_token(self) -> str:
"""
Temporary IAM authentication credentials to access the ECR repository encoded in base64 in the form of `user_name:password`.
"""
return pulumi.get(self, "authorization_token")
@property
@pulumi.getter(name="expiresAt")
def METHOD_NAME(self) -> str:
"""
Time in UTC RFC3339 format when the authorization token expires.
"""
return pulumi.get(self, "expires_at")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def password(self) -> str:
"""
Password decoded from the authorization token.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="proxyEndpoint")
def proxy_endpoint(self) -> str:
"""
Registry URL to use in the docker login command.
"""
return pulumi.get(self, "proxy_endpoint")
@property
@pulumi.getter(name="registryId")
def registry_id(self) -> Optional[str]:
return pulumi.get(self, "registry_id")
@property
@pulumi.getter(name="userName")
def user_name(self) -> str:
"""
User name decoded from the authorization token.
"""
return pulumi.get(self, "user_name")
class AwaitableGetAuthorizationTokenResult(GetAuthorizationTokenResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAuthorizationTokenResult(
authorization_token=self.authorization_token,
METHOD_NAME=self.METHOD_NAME,
id=self.id,
password=self.password,
proxy_endpoint=self.proxy_endpoint,
registry_id=self.registry_id,
user_name=self.user_name)
def get_authorization_token(registry_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAuthorizationTokenResult:
"""
The ECR Authorization Token data source allows the authorization token, proxy endpoint, token expiration date, user name and password to be retrieved for an ECR repository.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
token = aws.ecr.get_authorization_token()
```
:param str registry_id: AWS account ID of the ECR Repository. If not specified the default account is assumed.
"""
__args__ = dict()
__args__['registryId'] = registry_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:ecr/getAuthorizationToken:getAuthorizationToken', __args__, opts=opts, typ=GetAuthorizationTokenResult).value
return AwaitableGetAuthorizationTokenResult(
authorization_token=pulumi.get(__ret__, 'authorization_token'),
METHOD_NAME=pulumi.get(__ret__, 'expires_at'),
id=pulumi.get(__ret__, 'id'),
password=pulumi.get(__ret__, 'password'),
proxy_endpoint=pulumi.get(__ret__, 'proxy_endpoint'),
registry_id=pulumi.get(__ret__, 'registry_id'),
user_name=pulumi.get(__ret__, 'user_name'))
@_utilities.lift_output_func(get_authorization_token)
def get_authorization_token_output(registry_id: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAuthorizationTokenResult]:
"""
The ECR Authorization Token data source allows the authorization token, proxy endpoint, token expiration date, user name and password to be retrieved for an ECR repository.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
token = aws.ecr.get_authorization_token()
```
:param str registry_id: AWS account ID of the ECR Repository. If not specified the default account is assumed.
"""
... |
298,605 | frexp | import sys
from collections.abc import Iterable
from typing import Protocol, SupportsFloat, TypeVar, overload
from typing_extensions import SupportsIndex, TypeAlias
_T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
if sys.version_info >= (3, 8):
_SupportsFloatOrIndex: TypeAlias = SupportsFloat | SupportsIndex
else:
_SupportsFloatOrIndex: TypeAlias = SupportsFloat
e: float
pi: float
inf: float
nan: float
tau: float
def acos(__x: _SupportsFloatOrIndex) -> float: ...
def acosh(__x: _SupportsFloatOrIndex) -> float: ...
def asin(__x: _SupportsFloatOrIndex) -> float: ...
def asinh(__x: _SupportsFloatOrIndex) -> float: ...
def atan(__x: _SupportsFloatOrIndex) -> float: ...
def atan2(__y: _SupportsFloatOrIndex, __x: _SupportsFloatOrIndex) -> float: ...
def atanh(__x: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 11):
def cbrt(__x: _SupportsFloatOrIndex) -> float: ...
class _SupportsCeil(Protocol[_T_co]):
def __ceil__(self) -> _T_co: ...
@overload
def ceil(__x: _SupportsCeil[_T]) -> _T: ...
@overload
def ceil(__x: _SupportsFloatOrIndex) -> int: ...
if sys.version_info >= (3, 8):
def comb(__n: SupportsIndex, __k: SupportsIndex) -> int: ...
def copysign(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex) -> float: ...
def cos(__x: _SupportsFloatOrIndex) -> float: ...
def cosh(__x: _SupportsFloatOrIndex) -> float: ...
def degrees(__x: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 8):
def dist(__p: Iterable[_SupportsFloatOrIndex], __q: Iterable[_SupportsFloatOrIndex]) -> float: ...
def erf(__x: _SupportsFloatOrIndex) -> float: ...
def erfc(__x: _SupportsFloatOrIndex) -> float: ...
def exp(__x: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 11):
def exp2(__x: _SupportsFloatOrIndex) -> float: ...
def expm1(__x: _SupportsFloatOrIndex) -> float: ...
def fabs(__x: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 8):
def factorial(__x: SupportsIndex) -> int: ...
else:
def factorial(__x: int) -> int: ...
class _SupportsFloor(Protocol[_T_co]):
def __floor__(self) -> _T_co: ...
@overload
def floor(__x: _SupportsFloor[_T]) -> _T: ...
@overload
def floor(__x: _SupportsFloatOrIndex) -> int: ...
def fmod(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex) -> float: ...
def METHOD_NAME(__x: _SupportsFloatOrIndex) -> tuple[float, int]: ...
def fsum(__seq: Iterable[_SupportsFloatOrIndex]) -> float: ...
def gamma(__x: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 9):
def gcd(*integers: SupportsIndex) -> int: ...
else:
def gcd(__x: SupportsIndex, __y: SupportsIndex) -> int: ...
if sys.version_info >= (3, 8):
def hypot(*coordinates: _SupportsFloatOrIndex) -> float: ...
else:
def hypot(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex) -> float: ...
def isclose(
a: _SupportsFloatOrIndex,
b: _SupportsFloatOrIndex,
*,
rel_tol: _SupportsFloatOrIndex = 1e-09,
abs_tol: _SupportsFloatOrIndex = 0.0,
) -> bool: ...
def isinf(__x: _SupportsFloatOrIndex) -> bool: ...
def isfinite(__x: _SupportsFloatOrIndex) -> bool: ...
def isnan(__x: _SupportsFloatOrIndex) -> bool: ...
if sys.version_info >= (3, 8):
def isqrt(__n: SupportsIndex) -> int: ...
if sys.version_info >= (3, 9):
def lcm(*integers: SupportsIndex) -> int: ...
def ldexp(__x: _SupportsFloatOrIndex, __i: int) -> float: ...
def lgamma(__x: _SupportsFloatOrIndex) -> float: ...
def log(x: _SupportsFloatOrIndex, base: _SupportsFloatOrIndex = ...) -> float: ...
def log10(__x: _SupportsFloatOrIndex) -> float: ...
def log1p(__x: _SupportsFloatOrIndex) -> float: ...
def log2(__x: _SupportsFloatOrIndex) -> float: ...
def modf(__x: _SupportsFloatOrIndex) -> tuple[float, float]: ...
if sys.version_info >= (3, 12):
def nextafter(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex, *, steps: SupportsIndex | None = None) -> float: ...
elif sys.version_info >= (3, 9):
def nextafter(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 8):
def perm(__n: SupportsIndex, __k: SupportsIndex | None = None) -> int: ...
def pow(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 8):
@overload
def prod(__iterable: Iterable[SupportsIndex], *, start: SupportsIndex = 1) -> int: ... # type: ignore[misc]
@overload
def prod(__iterable: Iterable[_SupportsFloatOrIndex], *, start: _SupportsFloatOrIndex = 1) -> float: ...
def radians(__x: _SupportsFloatOrIndex) -> float: ...
def remainder(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex) -> float: ...
def sin(__x: _SupportsFloatOrIndex) -> float: ...
def sinh(__x: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 12):
def sumprod(__p: Iterable[float], __q: Iterable[float]) -> float: ...
def sqrt(__x: _SupportsFloatOrIndex) -> float: ...
def tan(__x: _SupportsFloatOrIndex) -> float: ...
def tanh(__x: _SupportsFloatOrIndex) -> float: ...
# Is different from `_typeshed.SupportsTrunc`, which is not generic
class _SupportsTrunc(Protocol[_T_co]):
def __trunc__(self) -> _T_co: ...
def trunc(__x: _SupportsTrunc[_T]) -> _T: ...
if sys.version_info >= (3, 9):
def ulp(__x: _SupportsFloatOrIndex) -> float: ... |
298,606 | set up | import tempfile
import unittest
from pathlib import Path
from iblrig import path_helper
class TestPathHelper(unittest.TestCase):
def METHOD_NAME(self):
pass
def test_get_remote_server_path(self):
p = path_helper.get_remote_server_path()
self.assertIsNotNone(p)
self.assertIsInstance(p, Path)
def test_get_iblrig_local_data_path(self):
# test without specifying subject arg
p = path_helper.get_iblrig_local_data_path()
self.assertIsNotNone(p)
self.assertIsInstance(p, Path)
self.assertTrue(p.parts[-1] == "Subjects")
# test specifying subject=True
p = path_helper.get_iblrig_local_data_path(subjects=True)
self.assertIsNotNone(p)
self.assertIsInstance(p, Path)
self.assertTrue(p.parts[-1] == "Subjects")
# test specifying subject=False
p = path_helper.get_iblrig_local_data_path(subjects=False)
self.assertIsNotNone(p)
self.assertIsInstance(p, Path)
self.assertTrue(p.parts[-1] != "Subjects")
def test_get_remote_server_data_path(self):
# test without specifying subject arg
p = path_helper.get_iblrig_remote_server_data_path()
self.assertIsNotNone(p)
self.assertIsInstance(p, Path)
self.assertTrue(p.parts[-1] == "Subjects")
# test specifying subject=True
p = path_helper.get_iblrig_remote_server_data_path(subjects=True)
self.assertIsNotNone(p)
self.assertIsInstance(p, Path)
self.assertTrue(p.parts[-1] == "Subjects")
# test specifying subject=False
p = path_helper.get_iblrig_remote_server_data_path(subjects=False)
self.assertIsNotNone(p)
self.assertIsInstance(p, Path)
self.assertTrue(p.parts[-1] != "Subjects")
def test_get_iblrig_path(self):
p = path_helper.get_iblrig_path()
self.assertIsNotNone(p)
self.assertIsInstance(p, Path)
def test_get_iblrig_params_path(self):
p = path_helper.get_iblrig_params_path()
self.assertIsNotNone(p)
self.assertIsInstance(p, Path)
def test_get_iblrig_temp_alyx_path(self):
p = path_helper.get_iblrig_temp_alyx_path()
self.assertIsNotNone(p)
self.assertIsInstance(p, Path)
def test_get_commit_hash(self):
import subprocess
out = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode().strip()
# Run it
ch = path_helper.get_commit_hash(str(path_helper.get_iblrig_path()))
self.assertTrue(out == ch)
def test_get_previous_session_folders(self):
test_subject_name = "_iblrig_test_mouse"
self.local_dir = tempfile.TemporaryDirectory()
self.remote_dir = tempfile.TemporaryDirectory()
def create_local_session():
local_session_folder = (
Path(self.local_dir.name) / "Subjects" / test_subject_name / "1900-01-01" / "001"
)
local_session_folder.mkdir(parents=True)
return str(local_session_folder)
def create_remote_subject():
remote_subject_dir = Path(self.remote_dir.name) / "Subjects"
remote_subject_dir.mkdir(parents=True)
return str(remote_subject_dir)
def assert_values(previous_session_folders):
self.assertTrue(isinstance(previous_session_folders, list))
if previous_session_folders:
# returned list is not empty and should contain strings
for session_folder in previous_session_folders:
self.assertTrue(isinstance(session_folder, str))
# Test for an existing subject, local does exist and remote does exist
# Create local session and remote subject temp directories
test_local_session_folder = create_local_session()
test_remote_subject_folder = create_remote_subject()
# Call the function
test_previous_session_folders = path_helper.get_previous_session_folders(
test_subject_name,
test_local_session_folder,
remote_subject_folder=test_remote_subject_folder,
)
assert_values(test_previous_session_folders)
# Test for an existing subject, local does exist and remote does NOT exist
self.remote_dir.cleanup()
# Call the function
test_previous_session_folders = path_helper.get_previous_session_folders(
test_subject_name,
test_local_session_folder,
remote_subject_folder=test_remote_subject_folder,
)
assert_values(test_previous_session_folders)
# Test for an existing subject, local does NOT exist and remote does exist
self.local_dir.cleanup()
test_remote_subject_folder = create_remote_subject()
# Call the function
test_previous_session_folders = path_helper.get_previous_session_folders(
test_subject_name,
test_local_session_folder,
remote_subject_folder=test_remote_subject_folder,
)
assert_values(test_previous_session_folders)
# Test for an existing subject, local does NOT exist and remote does NOT exist
self.local_dir.cleanup()
self.remote_dir.cleanup()
# Call the function
test_previous_session_folders = path_helper.get_previous_session_folders(
test_subject_name,
test_local_session_folder,
remote_subject_folder=test_remote_subject_folder,
)
assert_values(test_previous_session_folders)
# Test for a new subject
test_new_subject_name = "_new_iblrig_test_mouse"
test_new_session_folder = (Path(self.local_dir.name) / "Subjects" / test_new_subject_name / "1970-01-01" / "001")
test_previous_session_folders = path_helper.get_previous_session_folders(test_new_subject_name,
str(test_new_session_folder))
self.assertTrue(isinstance(test_previous_session_folders, list))
self.assertTrue(not test_previous_session_folders) # returned list should be empty
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main(exit=False) |
298,607 | serialize related identifiers | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public Licnse
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo Serializers."""
from __future__ import absolute_import, print_function
from invenio_pidrelations.contrib.versioning import PIDVersioning
from invenio_pidstore.models import PersistentIdentifier
from zenodo.modules.records.api import ZenodoRecord
def METHOD_NAME(pid):
"""Serialize PID Versioning relations as related_identifiers metadata."""
pv = PIDVersioning(child=pid)
related_identifiers = []
if pv.exists:
rec = ZenodoRecord.get_record(pid.get_assigned_object())
# External DOI records don't have Concept DOI
if 'conceptdoi' in rec:
ri = {
'scheme': 'doi',
'relation': 'isVersionOf',
'identifier': rec['conceptdoi']
}
related_identifiers.append(ri)
# TODO: We do not serialize previous/next versions to
# related identifiers because of the semantic-versioning cases
# (e.g. GitHub releases of minor versions)
#
# children = pv.children.all()
# idx = children.index(pid)
# left = children[:idx]
# right = children[idx + 1:]
# for p in left:
# rec = ZenodoRecord.get_record(p.get_assigned_object())
# ri = {
# 'scheme': 'doi',
# 'relation': 'isNewVersionOf',
# 'identifier': rec['doi']
# }
# related_identifiers.append(ri)
# for p in right:
# rec = ZenodoRecord.get_record(p.get_assigned_object())
# ri = {
# 'scheme': 'doi',
# 'relation': 'isPreviousVersionOf',
# 'identifier': rec['doi']
# }
# related_identifiers.append(ri)
pv = PIDVersioning(parent=pid)
if pv.exists:
for p in pv.children:
rec = ZenodoRecord.get_record(p.get_assigned_object())
ri = {
'scheme': 'doi',
'relation': 'hasVersion',
'identifier': rec['doi']
}
related_identifiers.append(ri)
return related_identifiers
def preprocess_related_identifiers(pid, record, result):
"""Preprocess related identifiers for record serialization.
Resolves the passed pid to the proper `recid` in order to add related
identifiers from PID relations.
"""
recid_value = record.get('recid')
if pid.pid_type == 'doi' and pid.pid_value == record.get('conceptdoi'):
recid_value = record.get('conceptrecid')
result['metadata']['doi'] = record.get('conceptdoi')
recid = (pid if pid.pid_value == recid_value else
PersistentIdentifier.get(pid_type='recid', pid_value=recid_value))
if recid.pid_value == record.get('conceptrecid'):
pv = PIDVersioning(parent=recid)
else:
pv = PIDVersioning(child=recid)
# Serialize PID versioning as related identifiers
if pv.exists:
rels = METHOD_NAME(recid)
if rels:
result['metadata'].setdefault(
'related_identifiers', []).extend(rels)
return result |
298,608 | get idtr | # CHIPSEC: Platform Security Assessment Framework
# Copyright (c) 2010-2021, Intel Corporation
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; Version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Contact information:
# chipsec@intel.com
#
"""
Access to CPU resources (for each CPU thread): Model Specific Registers (MSR), IDT/GDT
usage:
>>> read_msr( 0x8B )
>>> write_msr( 0x79, 0x12345678 )
>>> get_IDTR( 0 )
>>> get_GDTR( 0 )
>>> dump_Descriptor_Table( 0, DESCRIPTOR_TABLE_CODE_IDTR )
>>> IDT( 0 )
>>> GDT( 0 )
>>> IDT_all()
>>> GDT_all()
"""
from typing import Dict, Tuple, Optional
from chipsec.logger import logger, print_buffer_bytes
DESCRIPTOR_TABLE_CODE_IDTR = 0
DESCRIPTOR_TABLE_CODE_GDTR = 1
DESCRIPTOR_TABLE_CODE_LDTR = 2
MTRR_MEMTYPE_UC = 0x0
MTRR_MEMTYPE_WC = 0x1
MTRR_MEMTYPE_WT = 0x4
MTRR_MEMTYPE_WP = 0x5
MTRR_MEMTYPE_WB = 0x6
MemType: Dict[int, str] = {
MTRR_MEMTYPE_UC: 'Uncacheable (UC)',
MTRR_MEMTYPE_WC: 'Write Combining (WC)',
MTRR_MEMTYPE_WT: 'Write-through (WT)',
MTRR_MEMTYPE_WP: 'Write-protected (WP)',
MTRR_MEMTYPE_WB: 'Writeback (WB)'
}
class Msr:
def __init__(self, cs):
self.helper = cs.helper
self.cs = cs
def get_cpu_thread_count(self) -> int:
thread_count = self.helper.get_threads_count()
if thread_count is None or thread_count < 0:
logger().log_hal("helper.get_threads_count didn't return anything. Reading MSR 0x35 to find out number of logical CPUs (use CPUID Leaf B instead?)")
thread_count = self.cs.read_register_field("IA32_MSR_CORE_THREAD_COUNT", "Thread_Count")
if 0 == thread_count:
thread_count = 1
logger().log_hal(f'[cpu] # of logical CPUs: {thread_count:d}')
return thread_count
# @TODO: fix
def get_cpu_core_count(self) -> int:
core_count = self.cs.read_register_field("IA32_MSR_CORE_THREAD_COUNT", "Core_Count")
return core_count
##########################################################################################################
#
# Read/Write CPU MSRs
#
##########################################################################################################
def read_msr(self, cpu_thread_id: int, msr_addr: int) -> Tuple[int, int]:
(eax, edx) = self.helper.read_msr(cpu_thread_id, msr_addr)
logger().log_hal(f'[cpu{cpu_thread_id:d}] RDMSR( 0x{msr_addr:x} ): EAX = 0x{eax:08X}, EDX = 0x{edx:08X}')
return (eax, edx)
def write_msr(self, cpu_thread_id: int, msr_addr: int, eax: int, edx: int) -> None:
self.helper.write_msr(cpu_thread_id, msr_addr, eax, edx)
logger().log_hal(f'[cpu{cpu_thread_id:d}] WRMSR( 0x{msr_addr:x} ): EAX = 0x{eax:08X}, EDX = 0x{edx:08X}')
return None
##########################################################################################################
#
# Get CPU Descriptor Table Registers (IDTR, GDTR, LDTR..)
#
##########################################################################################################
def get_Desc_Table_Register(self, cpu_thread_id: int, code: int) -> Tuple[int, int, int]:
desc_table = self.helper.get_descriptor_table(cpu_thread_id, code)
if desc_table is None:
logger().log_hal(f'[msr] Unable to locate CPU Descriptor Table: Descriptor table code = {code:d}')
return (0, 0, 0)
return desc_table
def METHOD_NAME(self, cpu_thread_id: int) -> Tuple[int, int, int]:
(limit, base, pa) = self.get_Desc_Table_Register(cpu_thread_id, DESCRIPTOR_TABLE_CODE_IDTR)
logger().log_hal(f'[cpu{cpu_thread_id:d}] IDTR Limit = 0x{limit:04X}, Base = 0x{base:016X}, Physical Address = 0x{pa:016X}')
return (limit, base, pa)
def get_GDTR(self, cpu_thread_id: int) -> Tuple[int, int, int]:
(limit, base, pa) = self.get_Desc_Table_Register(cpu_thread_id, DESCRIPTOR_TABLE_CODE_GDTR)
logger().log_hal(f'[cpu{cpu_thread_id:d}] GDTR Limit = 0x{limit:04X}, Base = 0x{base:016X}, Physical Address = 0x{pa:016X}')
return (limit, base, pa)
def get_LDTR(self, cpu_thread_id: int) -> Tuple[int, int, int]:
(limit, base, pa) = self.get_Desc_Table_Register(cpu_thread_id, DESCRIPTOR_TABLE_CODE_LDTR)
logger().log_hal(f'[cpu{cpu_thread_id:d}] LDTR Limit = 0x{limit:04X}, Base = 0x{base:016X}, Physical Address = 0x{pa:016X}')
return (limit, base, pa)
##########################################################################################################
#
# Dump CPU Descriptor Tables (IDT, GDT, LDT..)
#
##########################################################################################################
def dump_Descriptor_Table(self, cpu_thread_id: int, code: int, num_entries: Optional[int] = None) -> Tuple[int, int]:
(limit, _, pa) = self.helper.get_descriptor_table(cpu_thread_id, code)
dt = self.helper.read_phys_mem(pa, limit + 1)
total_num = len(dt) // 16
if (num_entries is None) or (total_num < num_entries):
num_entries = total_num
logger().log(f'[cpu{cpu_thread_id:d}] Physical Address: 0x{pa:016X}')
logger().log(f'[cpu{cpu_thread_id:d}] # of entries : {total_num:d}')
logger().log(f'[cpu{cpu_thread_id:d}] Contents ({num_entries:d} entries):')
print_buffer_bytes(dt)
logger().log('--------------------------------------')
logger().log('# segment:offset attributes')
logger().log('--------------------------------------')
for i in range(0, num_entries):
offset = (dt[i * 16 + 11] << 56) | (dt[i * 16 + 10] << 48) | (dt[i * 16 + 9] << 40) | (dt[i * 16 + 8] << 32) | (dt[i * 16 + 7] << 24) | (dt[i * 16 + 6] << 16) | (dt[i * 16 + 1] << 8) | dt[i * 16 + 0]
segsel = (dt[i * 16 + 3] << 8) | dt[i * 16 + 2]
attr = (dt[i * 16 + 5] << 8) | dt[i * 16 + 4]
logger().log(f'{i:03d} {segsel:04X}:{offset:016X} 0x{attr:04X}')
return (pa, dt)
def IDT(self, cpu_thread_id: int, num_entries: Optional[int] = None) -> Tuple[int, int]:
logger().log_hal(f'[cpu{cpu_thread_id:d}] IDT:')
return self.dump_Descriptor_Table(cpu_thread_id, DESCRIPTOR_TABLE_CODE_IDTR, num_entries)
def GDT(self, cpu_thread_id: int, num_entries: Optional[int] = None) -> Tuple[int, int]:
logger().log_hal(f'[cpu{cpu_thread_id:d}] GDT:')
return self.dump_Descriptor_Table(cpu_thread_id, DESCRIPTOR_TABLE_CODE_GDTR, num_entries)
def IDT_all(self, num_entries: Optional[int] = None) -> None:
for tid in range(self.get_cpu_thread_count()):
self.IDT(tid, num_entries)
def GDT_all(self, num_entries: Optional[int] = None) -> None:
for tid in range(self.get_cpu_thread_count()):
self.GDT(tid, num_entries) |
298,609 | exercise linear normal equations | from __future__ import absolute_import, division, print_function
import libtbx.load_env
from scitbx.array_family import flex
from scitbx import sparse
from scitbx.lstbx import normal_eqns, normal_eqns_solving
from libtbx.test_utils import approx_equal, Exception_expected
from scitbx.lstbx.tests import test_problems
from six.moves import range
def METHOD_NAME():
py_eqs = [ ( 1, (-1, 0, 0), 1),
( 2, ( 2, -1, 0), 3),
(-1, ( 0, 2, 1), 2),
(-2, ( 0, 1, 0), -2),
]
eqs_0 = normal_eqns.linear_ls(3)
for b, a, w in py_eqs:
eqs_0.add_equation(right_hand_side=b,
design_matrix_row=flex.double(a),
weight=w)
eqs_1 = normal_eqns.linear_ls(3)
b = flex.double()
w = flex.double()
a = sparse.matrix(len(py_eqs), 3)
for i, (b_, a_, w_) in enumerate(py_eqs):
b.append(b_)
w.append(w_)
for j in range(3):
if a_[j]: a[i, j] = a_[j]
eqs_1.add_equations(right_hand_side=b, design_matrix=a, weights=w)
assert approx_equal(
eqs_0.normal_matrix_packed_u(), eqs_1.normal_matrix_packed_u(), eps=1e-15)
assert approx_equal(
eqs_0.right_hand_side(), eqs_1.right_hand_side(), eps=1e-15)
assert approx_equal(
list(eqs_0.normal_matrix_packed_u()), [ 13, -6, 0, 9, 4, 2 ], eps=1e-15)
assert approx_equal(
list(eqs_0.right_hand_side()), [ 11, -6, -2 ], eps=1e-15)
non_linear_ls_with_separable_scale_factor__impls = (
normal_eqns.non_linear_ls_with_separable_scale_factor__level_2_blas_impl,
)
try:
from fast_linalg import env
if env.initialised:
non_linear_ls_with_separable_scale_factor__impls += (
normal_eqns.non_linear_ls_with_separable_scale_factor__level_3_blas_impl,
)
except ImportError:
print('Skipping fast_linalg checks')
def exercise_non_linear_ls_with_separable_scale_factor():
for impl in non_linear_ls_with_separable_scale_factor__impls:
test = test_problems.polynomial_fit(impl)(normalised=False)
test.build_up()
assert test.n_equations == test.n_data;
# Reference values computed in tst_normal_equations.nb
eps = 5e-14
assert approx_equal(test.optimal_scale_factor(), 0.6148971786833856, eps)
assert approx_equal(test.objective(), 0.039642707534326034, eps)
assert approx_equal(test.chi_sq(), 0.011326487866950296, eps)
assert not test.step_equations().solved
try:
test.step_equations().cholesky_factor_packed_u()
raise Exception_expected
except RuntimeError:
pass
try:
test.step_equations().solution()
raise Exception_expected
except RuntimeError:
pass
assert approx_equal(
list(test.step_equations().normal_matrix_packed_u()),
[ 0.371944193675858, 0.39066546997866547 , 0.10797294655500618,
0.41859250354804045, 0.08077629438075473,
0.19767268057900367 ],
eps)
assert approx_equal(
list(test.step_equations().right_hand_side()),
[ 0.12149917297914861, 0.13803759252793774, -0.025190641142579157 ],
eps)
test.step_equations().solve()
assert test.step_equations().solved
try:
test.step_equations().normal_matrix_packed_u()
raise Exception_expected
except RuntimeError:
pass
try:
test.step_equations().right_hand_side()
raise Exception_expected
except RuntimeError:
pass
assert approx_equal(
list(test.step_equations().cholesky_factor_packed_u()),
[ 0.6098722765266986, 0.6405693208478925 , 0.1770418999366983 ,
0.09090351333425013, -0.3589664912436558 ,
0.19357661121640218 ],
eps)
assert approx_equal(
list(test.step_equations().solution()),
[ 1.2878697604109028, -0.7727798877778043, -0.5151113342942297 ],
eps=1e-12)
test_bis = test_problems.polynomial_fit(impl)(normalised=True)
test_bis.build_up()
assert approx_equal(test_bis.objective(),
test.objective()/test.sum_w_yo_sq(),
eps=1e-15)
assert approx_equal(test_bis.chi_sq(), test.chi_sq(), eps=1e-15)
def exercise_non_linear_ls_with_separable_scale_factor_plus_penalty():
for impl in non_linear_ls_with_separable_scale_factor__impls:
test = test_problems.polynomial_fit_with_penalty(impl)(normalised=False)
test.build_up()
assert test.n_equations == test.n_data + 1
eps = 5e-14
# reference values from tst_normal_equations.nb again
assert approx_equal(test.optimal_scale_factor(), 0.6148971786833856, eps)
redu = test.reduced_problem()
assert test.objective() == redu.objective()
assert test.step_equations().right_hand_side()\
.all_eq(redu.step_equations().right_hand_side())
assert test.step_equations().normal_matrix_packed_u()\
.all_eq(redu.step_equations().normal_matrix_packed_u())
assert approx_equal(test.objective(), 1.3196427075343262, eps)
assert approx_equal(test.chi_sq(), 0.32991067688358156, eps)
assert approx_equal(
test.step_equations().right_hand_side(),
(1.7214991729791487, -1.4619624074720623, 1.5748093588574208),
eps)
assert approx_equal(
test.step_equations().normal_matrix_packed_u(),
(1.371944193675858, -0.6093345300213344, 1.107972946555006,
1.4185925035480405, -0.9192237056192452,
1.1976726805790037),
eps)
test_bis = test_problems.polynomial_fit_with_penalty(impl)(normalised=True)
test_bis.build_up()
assert approx_equal(test_bis.chi_sq(), test.chi_sq(), eps=1e-15)
n_equations = test.n_equations
test.build_up()
assert test.n_equations == n_equations
def exercise_levenberg_marquardt(non_linear_ls, plot=False):
non_linear_ls.restart()
iterations = normal_eqns_solving.levenberg_marquardt_iterations(
non_linear_ls,
track_all=True,
gradient_threshold=1e-8,
step_threshold=1e-8,
tau=1e-4,
n_max_iterations=200)
assert non_linear_ls.n_equations == non_linear_ls.n_data
assert approx_equal(non_linear_ls.x, non_linear_ls.arg_min, eps=5e-4)
print("L-M: %i iterations" % iterations.n_iterations)
if plot:
f = open('plot.nb', 'w')
print("g=%s;" % iterations.gradient_norm_history.mathematica_form(), file=f)
print("\\[Mu]=%s;" % iterations.mu_history.mathematica_form(), file=f)
print("ListLogPlot[{g,\\[Mu]},Joined->True]", file=f)
f.close()
def run():
import sys
plot = '--plot' in sys.argv[1:]
t = test_problems.exponential_fit()
exercise_levenberg_marquardt(t, plot)
METHOD_NAME()
exercise_non_linear_ls_with_separable_scale_factor()
exercise_non_linear_ls_with_separable_scale_factor_plus_penalty()
print('OK')
if __name__ == '__main__':
run() |
298,610 | test onehot categories | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import cpu_only_import_from
from sklearn.preprocessing import OneHotEncoder as SkOneHotEncoder
from cuml.testing.utils import (
stress_param,
generate_inputs_from_categories,
assert_inverse_equal,
from_df_to_numpy,
)
from cuml.dask.preprocessing import OneHotEncoder
import dask.array as da
from cuml.internals.safe_imports import cpu_only_import
from cudf import DataFrame, Series
import pytest
from cuml.internals.safe_imports import gpu_only_import
dask_cudf = gpu_only_import("dask_cudf")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
assert_frame_equal = cpu_only_import_from(
"pandas.testing", "assert_frame_equal"
)
@pytest.mark.mg
def test_onehot_vs_skonehot(client):
X = DataFrame({"gender": ["Male", "Female", "Female"], "int": [1, 3, 2]})
skX = from_df_to_numpy(X)
X = dask_cudf.from_cudf(X, npartitions=2)
enc = OneHotEncoder(sparse=False)
skohe = SkOneHotEncoder(sparse=False)
ohe = enc.fit_transform(X)
ref = skohe.fit_transform(skX)
cp.testing.assert_array_equal(ohe.compute(), ref)
@pytest.mark.mg
@pytest.mark.parametrize(
"drop", [None, "first", {"g": Series("F"), "i": Series(3)}]
)
def test_onehot_inverse_transform(client, drop):
df = DataFrame({"g": ["M", "F", "F"], "i": [1, 3, 2]})
X = dask_cudf.from_cudf(df, npartitions=2)
enc = OneHotEncoder(drop=drop)
ohe = enc.fit_transform(X)
inv = enc.inverse_transform(ohe)
assert_frame_equal(
inv.compute().to_pandas().reset_index(drop=True), df.to_pandas()
)
@pytest.mark.mg
def METHOD_NAME(client):
X = DataFrame({"chars": ["a", "b"], "int": [0, 2]})
X = dask_cudf.from_cudf(X, npartitions=2)
cats = DataFrame({"chars": ["a", "b", "c"], "int": [0, 1, 2]})
enc = OneHotEncoder(categories=cats, sparse=False)
ref = cp.array(
[[1.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 1.0]]
)
res = enc.fit_transform(X)
cp.testing.assert_array_equal(res.compute(), ref)
@pytest.mark.mg
def test_onehot_fit_handle_unknown(client):
X = DataFrame({"chars": ["a", "b"], "int": [0, 2]})
Y = DataFrame({"chars": ["c", "b"], "int": [0, 2]})
X = dask_cudf.from_cudf(X, npartitions=2)
enc = OneHotEncoder(handle_unknown="error", categories=Y)
with pytest.raises(KeyError):
enc.fit(X)
enc = OneHotEncoder(handle_unknown="ignore", categories=Y)
enc.fit(X)
@pytest.mark.mg
def test_onehot_transform_handle_unknown(client):
X = DataFrame({"chars": ["a", "b"], "int": [0, 2]})
Y = DataFrame({"chars": ["c", "b"], "int": [0, 2]})
X = dask_cudf.from_cudf(X, npartitions=2)
Y = dask_cudf.from_cudf(Y, npartitions=2)
enc = OneHotEncoder(handle_unknown="error", sparse=False)
enc = enc.fit(X)
with pytest.raises(KeyError):
enc.transform(Y).compute()
enc = OneHotEncoder(handle_unknown="ignore", sparse=False)
enc = enc.fit(X)
ohe = enc.transform(Y)
ref = cp.array([[0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]])
cp.testing.assert_array_equal(ohe.compute(), ref)
@pytest.mark.mg
def test_onehot_inverse_transform_handle_unknown(client):
X = DataFrame({"chars": ["a", "b"], "int": [0, 2]})
X = dask_cudf.from_cudf(X, npartitions=2)
Y_ohe = cp.array([[0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]])
Y_ohe = da.from_array(Y_ohe)
enc = OneHotEncoder(handle_unknown="ignore")
enc = enc.fit(X)
df = enc.inverse_transform(Y_ohe)
ref = DataFrame({"chars": [None, "b"], "int": [0, 2]})
assert_frame_equal(df.compute().to_pandas(), ref.to_pandas())
@pytest.mark.mg
@pytest.mark.parametrize("drop", [None, "first"])
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
@pytest.mark.parametrize("sparse", [True, False], ids=["sparse", "dense"])
@pytest.mark.parametrize("n_samples", [10, 1000, stress_param(50000)])
def test_onehot_random_inputs(client, drop, as_array, sparse, n_samples):
X, ary = generate_inputs_from_categories(
n_samples=n_samples, as_array=as_array
)
if as_array:
dX = da.from_array(X)
else:
dX = dask_cudf.from_cudf(X, npartitions=1)
enc = OneHotEncoder(sparse=sparse, drop=drop, categories="auto")
sk_enc = SkOneHotEncoder(sparse=sparse, drop=drop, categories="auto")
ohe = enc.fit_transform(dX)
ref = sk_enc.fit_transform(ary)
if sparse:
cp.testing.assert_array_equal(ohe.compute().toarray(), ref.toarray())
else:
cp.testing.assert_array_equal(ohe.compute(), ref)
inv_ohe = enc.inverse_transform(ohe)
assert_inverse_equal(inv_ohe.compute(), dX.compute())
@pytest.mark.mg
def test_onehot_drop_idx_first(client):
X_ary = [["c", 2, "a"], ["b", 2, "b"]]
X = DataFrame({"chars": ["c", "b"], "int": [2, 2], "letters": ["a", "b"]})
ddf = dask_cudf.from_cudf(X, npartitions=2)
enc = OneHotEncoder(sparse=False, drop="first")
sk_enc = SkOneHotEncoder(sparse=False, drop="first")
ohe = enc.fit_transform(ddf)
ref = sk_enc.fit_transform(X_ary)
cp.testing.assert_array_equal(ohe.compute(), ref)
inv = enc.inverse_transform(ohe)
assert_frame_equal(
inv.compute().to_pandas().reset_index(drop=True), X.to_pandas()
)
@pytest.mark.mg
def test_onehot_drop_one_of_each(client):
X_ary = [["c", 2, "a"], ["b", 2, "b"]]
X = DataFrame({"chars": ["c", "b"], "int": [2, 2], "letters": ["a", "b"]})
ddf = dask_cudf.from_cudf(X, npartitions=2)
drop = dict({"chars": "b", "int": 2, "letters": "b"})
enc = OneHotEncoder(sparse=False, drop=drop)
sk_enc = SkOneHotEncoder(sparse=False, drop=["b", 2, "b"])
ohe = enc.fit_transform(ddf)
ref = sk_enc.fit_transform(X_ary)
cp.testing.assert_array_equal(ohe.compute(), ref)
inv = enc.inverse_transform(ohe)
assert_frame_equal(
inv.compute().to_pandas().reset_index(drop=True), X.to_pandas()
)
@pytest.mark.mg
@pytest.mark.parametrize(
"drop, pattern",
[
[dict({"chars": "b"}), "`drop` should have as many columns"],
[
dict({"chars": "b", "int": [2, 0]}),
"Trying to drop multiple values",
],
[
dict({"chars": "b", "int": 3}),
"Some categories [a-zA-Z, ]* were not found",
],
[
DataFrame({"chars": "b", "int": 3}),
"Wrong input for parameter `drop`.",
],
],
)
def test_onehot_drop_exceptions(client, drop, pattern):
X = DataFrame({"chars": ["c", "b", "d"], "int": [2, 1, 0]})
X = dask_cudf.from_cudf(X, npartitions=2)
with pytest.raises(ValueError, match=pattern):
OneHotEncoder(sparse=False, drop=drop).fit(X)
@pytest.mark.mg
def test_onehot_get_categories(client):
X = DataFrame({"chars": ["c", "b", "d"], "ints": [2, 1, 0]})
X = dask_cudf.from_cudf(X, npartitions=2)
ref = [np.array(["b", "c", "d"]), np.array([0, 1, 2])]
enc = OneHotEncoder().fit(X)
cats = enc.categories_
for i in range(len(ref)):
np.testing.assert_array_equal(ref[i], cats[i].to_numpy()) |
298,611 | test reconstructable job namespace | import warnings
from datetime import datetime
import pytest
from dagster import (
DagsterInvariantViolationError,
Field,
StringSource,
execute_job,
graph,
job,
op,
reconstructable,
static_partitioned_config,
)
from dagster._core.storage.tags import PARTITION_NAME_TAG
from dagster._core.test_utils import environ, instance_for_test
def define_the_job():
@op
def my_op():
return 5
@job
def call_the_op():
for _ in range(10):
my_op()
return call_the_op
def test_simple_job_no_warnings():
# will fail if any warning is emitted
with warnings.catch_warnings():
warnings.simplefilter("error")
job = define_the_job()
assert job.execute_in_process().success
def test_job_execution_multiprocess_config():
with instance_for_test() as instance:
with execute_job(
reconstructable(define_the_job),
instance=instance,
run_config={"execution": {"config": {"multiprocess": {"max_concurrent": 4}}}},
) as result:
assert result.success
assert result.output_for_node("my_op") == 5
results_lst = []
def define_in_process_job():
@op
def my_op():
results_lst.append("entered")
@job
def call_the_op():
for _ in range(10):
my_op()
return call_the_op
def test_switch_to_in_process_execution():
result = define_in_process_job().execute_in_process(
run_config={"execution": {"config": {"in_process": {}}}},
)
assert result.success
assert len(results_lst) == 10
@graph
def basic_graph():
pass
basic_job = basic_graph.to_job()
def test_non_reconstructable_job_error():
with pytest.raises(
DagsterInvariantViolationError,
match="you must wrap the ``to_job`` call in a function at module scope",
):
reconstructable(basic_job)
@job
def my_namespace_job():
@op
def inner_op():
pass
inner_op()
def METHOD_NAME():
with instance_for_test() as instance:
result = execute_job(reconstructable(my_namespace_job), instance=instance)
assert result.success
def test_job_top_level_input():
@job
def my_job_with_input(x):
@op
def my_op(y):
return y
my_op(x)
result = my_job_with_input.execute_in_process(run_config={"inputs": {"x": {"value": 2}}})
assert result.success
assert result.output_for_node("my_op") == 2
def test_job_post_process_config():
@op(config_schema={"foo": Field(StringSource)})
def the_op(context):
return context.op_config["foo"]
@graph
def basic():
the_op()
with environ({"SOME_ENV_VAR": None}):
# Ensure that the env var not existing will not throw an error, since resolution happens in post-processing.
the_job = basic.to_job(
config={"ops": {"the_op": {"config": {"foo": {"env": "SOME_ENV_VAR"}}}}}
)
with environ({"SOME_ENV_VAR": "blah"}):
assert the_job.execute_in_process().success
def test_job_run_request():
def partition_fn(partition_key: str):
return {"ops": {"my_op": {"config": {"partition": partition_key}}}}
@static_partitioned_config(partition_keys=["a", "b", "c", "d"])
def my_partitioned_config(partition_key: str):
return partition_fn(partition_key)
@op
def my_op():
pass
@job(config=my_partitioned_config)
def my_job():
my_op()
for partition_key in ["a", "b", "c", "d"]:
run_request = my_job.run_request_for_partition(partition_key=partition_key, run_key=None)
assert run_request.run_config == partition_fn(partition_key)
assert run_request.tags
assert run_request.tags.get(PARTITION_NAME_TAG) == partition_key
run_request_with_tags = my_job.run_request_for_partition(
partition_key=partition_key, run_key=None, tags={"foo": "bar"}
)
assert run_request_with_tags.run_config == partition_fn(partition_key)
assert run_request_with_tags.tags
assert run_request_with_tags.tags.get(PARTITION_NAME_TAG) == partition_key
assert run_request_with_tags.tags.get("foo") == "bar"
assert my_job.run_request_for_partition(partition_key="a", run_config={"a": 5}).run_config == {
"a": 5
}
# Datetime is not serializable
@op
def op_expects_date(the_date: datetime) -> str:
return the_date.strftime("%m/%d/%Y")
@job(input_values={"the_date": datetime.now()})
def pass_from_job(the_date):
op_expects_date(the_date)
def test_job_input_values_out_of_process():
# Test job execution with non-serializable input type out-of-process
assert pass_from_job.execute_in_process().success
with instance_for_test() as instance:
result = execute_job(reconstructable(pass_from_job), instance=instance)
assert result.success
def test_subset_job_with_config():
@op
def echo(x: int):
return x
@job
def no_config():
echo(echo.alias("emit")())
result = no_config.execute_in_process(run_config={"ops": {"emit": {"inputs": {"x": 1}}}})
assert result.success
subset_no_config = no_config.get_subset(op_selection=["echo"])
result = subset_no_config.execute_in_process(run_config={"ops": {"echo": {"inputs": {"x": 1}}}})
assert result.success
@job(config={"ops": {"emit": {"inputs": {"x": 1}}}})
def with_config():
echo(echo.alias("emit")())
result = with_config.execute_in_process()
assert result.success
subset_with_config = with_config.get_subset(op_selection=["echo"])
result = subset_with_config.execute_in_process(
run_config={"ops": {"echo": {"inputs": {"x": 1}}}}
)
assert result.success
def test_coerce_resource_job_decorator() -> None:
executed = {}
class BareResourceObject:
pass
@op(required_resource_keys={"bare_resource"})
def an_op(context) -> None:
assert context.resources.bare_resource
executed["yes"] = True
@job(resource_defs={"bare_resource": BareResourceObject()})
def a_job() -> None:
an_op()
assert a_job.execute_in_process().success
assert executed["yes"]
def test_coerce_resource_graph_to_job() -> None:
executed = {}
class BareResourceObject:
pass
@op(required_resource_keys={"bare_resource"})
def an_op(context) -> None:
assert context.resources.bare_resource
executed["yes"] = True
@graph
def a_graph() -> None:
an_op()
a_job = a_graph.to_job(resource_defs={"bare_resource": BareResourceObject()})
assert a_job.execute_in_process().success
assert executed["yes"] |
298,612 | repr | """
Finitely and Freely Generated Lie Conformal Algebras.
AUTHORS:
- Reimundo Heluani (2019-08-09): Initial implementation.
"""
#******************************************************************************
# Copyright (C) 2019 Reimundo Heluani <heluani@potuz.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.categories.lie_conformal_algebras import LieConformalAlgebras
from .freely_generated_lie_conformal_algebra import \
FreelyGeneratedLieConformalAlgebra
class FinitelyFreelyGeneratedLCA(FreelyGeneratedLieConformalAlgebra):
"""
Abstract base class for finitely generated Lie conformal
algebras.
This class provides minimal functionality, simply sets the
number of generators.
"""
def __init__(self, R, index_set=None, central_elements=None, category=None,
element_class=None, prefix=None, names=None, latex_names=None,
**kwds):
"""
Initialize self.
TESTS::
sage: V = lie_conformal_algebras.Virasoro(QQ)
sage: TestSuite(V).run()
"""
default_category = LieConformalAlgebras(R).FinitelyGenerated()
try:
category = default_category.or_subcategory(category)
except ValueError:
category = default_category.Super().or_subcategory(category)
from sage.categories.sets_cat import Sets
if index_set not in Sets().Finite():
raise TypeError("index_set must be a finite set")
super().__init__(R,
index_set=index_set, central_elements=central_elements,
category=category, element_class=element_class,
prefix=prefix, **kwds)
self._ngens = len(self._generators)
self._names = names
self._latex_names = latex_names
def METHOD_NAME(self):
"""
The name of this Lie conformal algebra.
EXAMPLES::
sage: bosondict = {('a','a'):{1:{('K',0):1}}}
sage: R = LieConformalAlgebra(QQ,bosondict,names=('a',),central_elements=('K',))
sage: R
Lie conformal algebra with generators (a, K) over Rational Field
"""
if self._ngens == 1:
return "Lie conformal algebra generated by {0} over {1}".format(
self.gen(0), self.base_ring())
return "Lie conformal algebra with generators {0} over {1}".format(
self.gens(), self.base_ring())
def _an_element_(self):
"""
An element of this Lie conformal algebra.
EXAMPLES::
sage: R = lie_conformal_algebras.NeveuSchwarz(QQ); R.an_element()
L + G + C
"""
return self.sum(self.gens())
def ngens(self):
"""
The number of generators of this Lie conformal algebra.
EXAMPLES::
sage: Vir = lie_conformal_algebras.Virasoro(QQ); Vir.ngens()
2
sage: V = lie_conformal_algebras.Affine(QQ, 'A1'); V.ngens()
4
"""
return self._ngens
@cached_method
def gens(self):
"""
The generators for this Lie conformal algebra.
OUTPUT:
This method returns a tuple with the (finite) generators
of this Lie conformal algebra.
EXAMPLES::
sage: Vir = lie_conformal_algebras.Virasoro(QQ);
sage: Vir.gens()
(L, C)
.. SEEALSO::
:meth:`lie_conformal_algebra_generators<\
FreelyGeneratedLieConformalAlgebra.\
lie_conformal_algebra_generators>`
"""
return self.lie_conformal_algebra_generators()
@cached_method
def central_elements(self):
"""
The central elements of this Lie conformal algebra.
EXAMPLES::
sage: R = lie_conformal_algebras.NeveuSchwarz(QQ); R.central_elements()
(C,)
"""
return tuple(FreelyGeneratedLieConformalAlgebra.central_elements(self)) |
298,613 | test abs grad | import numpy as np
import pytest
import pytensor
from pytensor.gradient import GradientError
from pytensor.tensor.basic import cast
from pytensor.tensor.math import complex as at_complex
from pytensor.tensor.math import complex_from_polar, imag, real
from pytensor.tensor.type import cvector, dvector, fmatrix, fvector, imatrix, zvector
from tests import unittest_tools as utt
class TestRealImag:
def test_basic(self):
x = zvector()
rng = np.random.default_rng(23)
xval = np.asarray(
[complex(rng.standard_normal(), rng.standard_normal()) for i in range(10)]
)
assert np.all(xval.real == pytensor.function([x], real(x))(xval))
assert np.all(xval.imag == pytensor.function([x], imag(x))(xval))
def test_on_real_input(self):
x = dvector()
rng = np.random.default_rng(23)
xval = rng.standard_normal(10)
np.all(0 == pytensor.function([x], imag(x))(xval))
np.all(xval == pytensor.function([x], real(x))(xval))
x = imatrix()
xval = np.asarray(rng.standard_normal((3, 3)) * 100, dtype="int32")
np.all(0 == pytensor.function([x], imag(x))(xval))
np.all(xval == pytensor.function([x], real(x))(xval))
def test_cast(self):
x = zvector()
with pytest.raises(TypeError):
cast(x, "int32")
def test_complex(self):
rng = np.random.default_rng(2333)
m = fmatrix()
c = at_complex(m[0], m[1])
assert c.type == cvector
r, i = [real(c), imag(c)]
assert r.type == fvector
assert i.type == fvector
f = pytensor.function([m], [r, i])
mval = np.asarray(rng.standard_normal((2, 5)), dtype="float32")
rval, ival = f(mval)
assert np.all(rval == mval[0]), (rval, mval[0])
assert np.all(ival == mval[1]), (ival, mval[1])
@pytest.mark.skip(reason="Complex grads not enabled, see #178")
def test_complex_grads(self):
def f(m):
c = at_complex(m[0], m[1])
return 0.5 * real(c) + 0.9 * imag(c)
rng = np.random.default_rng(9333)
mval = np.asarray(rng.standard_normal((2, 5)))
utt.verify_grad(f, [mval])
@pytest.mark.skip(reason="Complex grads not enabled, see #178")
def test_mul_mixed0(self):
def f(a):
ac = at_complex(a[0], a[1])
return abs((ac) ** 2).sum()
rng = np.random.default_rng(9333)
aval = np.asarray(rng.standard_normal((2, 5)))
try:
utt.verify_grad(f, [aval])
except GradientError as e:
print(e.num_grad.gf)
print(e.analytic_grad)
raise
@pytest.mark.skip(reason="Complex grads not enabled, see #178")
def test_mul_mixed1(self):
def f(a):
ac = at_complex(a[0], a[1])
return abs(ac).sum()
rng = np.random.default_rng(9333)
aval = np.asarray(rng.standard_normal((2, 5)))
try:
utt.verify_grad(f, [aval])
except GradientError as e:
print(e.num_grad.gf)
print(e.analytic_grad)
raise
@pytest.mark.skip(reason="Complex grads not enabled, see #178")
def test_mul_mixed(self):
def f(a, b):
ac = at_complex(a[0], a[1])
return abs((ac * b) ** 2).sum()
rng = np.random.default_rng(9333)
aval = np.asarray(rng.standard_normal((2, 5)))
bval = rng.standard_normal(5)
try:
utt.verify_grad(f, [aval, bval])
except GradientError as e:
print(e.num_grad.gf)
print(e.analytic_grad)
raise
@pytest.mark.skip(reason="Complex grads not enabled, see #178")
def test_polar_grads(self):
def f(m):
c = complex_from_polar(abs(m[0]), m[1])
return 0.5 * real(c) + 0.9 * imag(c)
rng = np.random.default_rng(9333)
mval = np.asarray(rng.standard_normal((2, 5)))
utt.verify_grad(f, [mval])
@pytest.mark.skip(reason="Complex grads not enabled, see #178")
def METHOD_NAME(self):
def f(m):
c = at_complex(m[0], m[1])
return 0.5 * abs(c)
rng = np.random.default_rng(9333)
mval = np.asarray(rng.standard_normal((2, 5)))
utt.verify_grad(f, [mval]) |
298,614 | read graph meta | from abc import ABC
from abc import abstractmethod
class SimpleClient(ABC):
"""
Abstract class for interacting with backend data store where the chunkedgraph is stored.
Eg., BigTableClient for using big table as storage.
"""
@abstractmethod
def create_graph(self) -> None:
"""Initialize the graph and store associated meta."""
@abstractmethod
def add_graph_version(self, version):
"""Add a version to the graph."""
@abstractmethod
def read_graph_version(self):
"""Read stored graph version."""
@abstractmethod
def update_graph_meta(self, meta):
"""Update stored graph meta."""
@abstractmethod
def METHOD_NAME(self):
"""Read stored graph meta."""
@abstractmethod
def read_nodes(
self,
start_id=None,
end_id=None,
node_ids=None,
properties=None,
start_time=None,
end_time=None,
end_time_inclusive=False,
):
"""
Read nodes and their properties.
Accepts a range of node IDs or specific node IDs.
"""
@abstractmethod
def read_node(
self,
node_id,
properties=None,
start_time=None,
end_time=None,
end_time_inclusive=False,
):
"""Read a single node and it's properties."""
@abstractmethod
def write_nodes(self, nodes):
"""Writes/updates nodes (IDs along with properties)."""
@abstractmethod
def lock_root(self, node_id, operation_id):
"""Locks root node with operation_id to prevent race conditions."""
@abstractmethod
def lock_roots(self, node_ids, operation_id):
"""Locks root nodes to prevent race conditions."""
@abstractmethod
def lock_root_indefinitely(self, node_id, operation_id):
"""Locks root node with operation_id to prevent race conditions."""
@abstractmethod
def lock_roots_indefinitely(self, node_ids, operation_id):
"""
Locks root nodes indefinitely to prevent structural damage to graph.
This scenario is rare and needs asynchronous fix or inspection to unlock.
"""
@abstractmethod
def unlock_root(self, node_id, operation_id):
"""Unlocks root node that is locked with operation_id."""
@abstractmethod
def unlock_indefinitely_locked_root(self, node_id, operation_id):
"""Unlocks root node that is indefinitely locked with operation_id."""
@abstractmethod
def renew_lock(self, node_id, operation_id):
"""Renews existing node lock with operation_id for extended time."""
@abstractmethod
def renew_locks(self, node_ids, operation_id):
"""Renews existing node locks with operation_id for extended time."""
@abstractmethod
def get_lock_timestamp(self, node_ids, operation_id):
"""Reads timestamp from lock row to get a consistent timestamp."""
@abstractmethod
def get_consolidated_lock_timestamp(self, root_ids, operation_ids):
"""Minimum of multiple lock timestamps."""
@abstractmethod
def get_compatible_timestamp(self, time_stamp):
"""Datetime time stamp compatible with client's services."""
class ClientWithIDGen(SimpleClient):
"""
Abstract class for client to backend data store that has support for generating IDs.
If not, something else can be used but these methods need to be implemented.
Eg., Big Table row cells can be used to generate unique IDs.
"""
@abstractmethod
def create_node_ids(self, chunk_id):
"""Generate a range of unique IDs in the chunk."""
@abstractmethod
def create_node_id(self, chunk_id):
"""Generate a unique ID in the chunk."""
@abstractmethod
def get_max_node_id(self, chunk_id):
"""Gets the current maximum node ID in the chunk."""
@abstractmethod
def create_operation_id(self):
"""Generate a unique operation ID."""
@abstractmethod
def get_max_operation_id(self):
"""Gets the current maximum operation ID."""
class OperationLogger(ABC):
"""
Abstract class for interacting with backend data store where the operation logs are stored.
Eg., BigTableClient can be used to store logs in Google BigTable.
"""
# TODO add functions for writing
@abstractmethod
def read_log_entry(self, operation_id: int) -> None:
"""Read log entry for a given operation ID."""
@abstractmethod
def read_log_entries(self, operation_ids) -> None:
"""Read log entries for given operation IDs.""" |
298,615 | test ensure coverage | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import sys
import datetime
import os
import platform
import warnings
from report import AutoRestReportService
class TestAcceptance(object):
def METHOD_NAME(self):
client = AutoRestReportService(base_url="http://localhost:3000")
report = client.get_report(platform.python_version())
optional_report = client.get_optional_report(platform.python_version())
# Add tests that wont be supported due to the nature of Python here
not_supported = {}
# Please add missing features or failing tests here
missing_features_or_bugs = {
'ConstantsInBody': 1, # https://github.com/Azure/autorest.modelerfour/issues/83
"ResponsesScenarioF400DefaultModel": 1,
"ResponsesScenarioF400DefaultNone": 1,
}
for name in report:
if name[:3].lower() == 'dpg':
missing_features_or_bugs[name] = 1 # no DPG tests for legacy
print("Coverage:")
self._print_report(report, not_supported, missing_features_or_bugs)
missing_features_or_bugs = {
"putDateTimeMaxLocalNegativeOffset": 1, # Python doesn't support year 1000
"putDateTimeMinLocalPositiveOffset": 1, # Python doesn't support BC time
'putDateTimeMaxUtc7MS': 1, # Python doesn't support 7 digits ms datetime
'FormdataStreamUploadFile': 1, # Form data not supported yet
'StreamUploadFile': 1, # Form data not supported yet
}
for name in optional_report:
if "Options" in name:
missing_features_or_bugs[name] = 1; # https://github.com/Azure/azure-sdk-for-python/pull/9322
if "Multiapi" in name:
# multiapi is in a separate test folder
missing_features_or_bugs[name] = 1
if "DPG" in name:
# dpg is in a separate test folder
missing_features_or_bugs[name] = 1
print("Optional coverage:")
self._print_report(optional_report, not_supported, missing_features_or_bugs, fail_if_missing=False)
def _print_report(self, report, not_supported=None, missing_features_or_bugs=None, fail_if_missing=True):
if not_supported:
report.update(not_supported)
for s in not_supported.keys():
print("IGNORING {0}".format(s))
if missing_features_or_bugs:
report.update(missing_features_or_bugs)
for s in missing_features_or_bugs.keys():
print("PENDING {0}".format(s))
failed = [k for k, v in report.items() if v == 0]
for s in failed:
print("FAILED TO EXECUTE {0}".format(s))
total_tests = len(report)
warnings.warn ("The test coverage is {0}/{1}.".format(total_tests - len(failed), total_tests))
if fail_if_missing:
assert 0 == len(failed) |
298,616 | axis reverse | """
Functions for acting on a axis of an array.
"""
import numpy as np
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
"""Take a slice along axis 'axis' from 'a'.
Parameters
----------
a : numpy.ndarray
The array to be sliced.
start, stop, step : int or None
The slice parameters.
axis : int, optional
The axis of `a` to be sliced.
Examples
--------
>>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> axis_slice(a, start=0, stop=1, axis=1)
array([[1],
[4],
[7]])
>>> axis_slice(a, start=1, axis=0)
array([[4, 5, 6],
[7, 8, 9]])
Notes
-----
The keyword arguments start, stop and step are used by calling
slice(start, stop, step). This implies axis_slice() does not
handle its arguments the exactly the same as indexing. To select
a single index k, for example, use
axis_slice(a, start=k, stop=k+1)
In this case, the length of the axis 'axis' in the result will
be 1; the trivial dimension is not removed. (Use numpy.squeeze()
to remove trivial axes.)
"""
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[tuple(a_slice)]
return b
def METHOD_NAME(a, axis=-1):
"""Reverse the 1-D slices of `a` along axis `axis`.
Returns axis_slice(a, step=-1, axis=axis).
"""
return axis_slice(a, step=-1, axis=axis)
def odd_ext(x, n, axis=-1):
"""
Odd extension at the boundaries of an array
Generate a new ndarray by making an odd extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import odd_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> odd_ext(a, 2)
array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
[-4, -1, 0, 1, 4, 9, 16, 23, 28]])
Odd extension is a "180 degree rotation" at the endpoints of the original
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = odd_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def even_ext(x, n, axis=-1):
"""
Even extension at the boundaries of an array
Generate a new ndarray by making an even extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import even_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> even_ext(a, 2)
array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3],
[ 4, 1, 0, 1, 4, 9, 16, 9, 4]])
Even extension is a "mirror image" at the boundaries of the original array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = even_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def const_ext(x, n, axis=-1):
"""
Constant extension at the boundaries of an array
Generate a new ndarray that is a constant extension of `x` along an axis.
The extension repeats the values at the first and last element of
the axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import const_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> const_ext(a, 2)
array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5],
[ 0, 0, 0, 1, 4, 9, 16, 16, 16]])
Constant extension continues with the same values as the endpoints of the
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = const_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def zero_ext(x, n, axis=-1):
"""
Zero padding at the boundaries of an array
Generate a new ndarray that is a zero-padded extension of `x` along
an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the
axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import zero_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> zero_ext(a, 2)
array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0],
[ 0, 0, 0, 1, 4, 9, 16, 0, 0]])
"""
if n < 1:
return x
zeros_shape = list(x.shape)
zeros_shape[axis] = n
zeros = np.zeros(zeros_shape, dtype=x.dtype)
ext = np.concatenate((zeros, x, zeros), axis=axis)
return ext |
298,617 | get project history | import datetime
import pandas as pd
from mapswipe_workers.definitions import logger
def calc_results_progress(
number_of_users: int,
number_of_users_required: int,
cum_number_of_users: int,
number_of_tasks: int,
number_of_results: int,
) -> int:
"""
for each project the progress is calculated
not all results are considered when calculating the progress
if the required number of users has been reached for a task
all further results will not contribute to increase the progress
"""
if cum_number_of_users <= number_of_users_required:
# this is the simplest case, the number of users is less than the required
# number of users all results contribute to progress
number_of_results_progress = number_of_results
elif (cum_number_of_users - number_of_users) < number_of_users_required:
# the number of users is bigger than the number of users required
# but the previous number of users was below the required number
# some results contribute to progress
number_of_results_progress = (
number_of_users_required - (cum_number_of_users - number_of_users)
) * number_of_tasks
else:
# for all other cases: already more users than required
# all results do not contribute to progress
number_of_results_progress = 0
return number_of_results_progress
def is_new_user(day: datetime.datetime, first_day: datetime.datetime):
"""
Check if user has contributed results to this project before
"""
if day == first_day:
return 1
else:
return 0
def get_progress_by_date(
results_df: pd.DataFrame, groups_df: pd.DataFrame
) -> pd.DataFrame:
"""
for each project we retrospectively generate the following attributes for a given
date utilizing the results.
number_of_results:
- number of results that have been contributed per day
- not used in firebase
cum_number_of_results:
- sum of daily number of results up to that day
progress:
- relative progress per day
(e.g. overall progress increased by 0.15 on that day)
- not used in firebase
cum_progress:
- absolute progress up to that day
- refers to the project progress attribute in firebase
"""
groups_df["required_results"] = (
groups_df["number_of_tasks"] * groups_df["number_of_users_required"]
)
required_results = groups_df["required_results"].sum()
logger.info(f"calcuated required results: {required_results}")
results_with_groups_df = results_df.merge(
groups_df, left_on="group_id", right_on="group_id"
)
results_by_group_id_df = results_with_groups_df.groupby(
["project_id_x", "group_id", "day"]
).agg(
number_of_tasks=pd.NamedAgg(column="number_of_tasks", aggfunc="min"),
number_of_users_required=pd.NamedAgg(
column="number_of_users_required", aggfunc="min"
),
number_of_users=pd.NamedAgg(column="user_id", aggfunc=pd.Series.nunique),
)
results_by_group_id_df["number_of_results"] = (
results_by_group_id_df["number_of_users"]
* results_by_group_id_df["number_of_tasks"]
)
results_by_group_id_df["cum_number_of_users"] = (
results_by_group_id_df["number_of_users"]
.groupby(["project_id_x", "group_id"])
.cumsum()
)
results_by_group_id_df["number_of_results_progress"] = results_by_group_id_df.apply(
lambda row: calc_results_progress(
row["number_of_users"],
row["number_of_users_required"],
row["cum_number_of_users"],
row["number_of_tasks"],
row["number_of_results"],
),
axis=1,
)
progress_by_date_df = (
results_by_group_id_df.reset_index()
.groupby(["day"])
.agg(
number_of_results=pd.NamedAgg(column="number_of_results", aggfunc="sum"),
number_of_results_progress=pd.NamedAgg(
column="number_of_results_progress", aggfunc="sum"
),
)
)
progress_by_date_df["cum_number_of_results"] = progress_by_date_df[
"number_of_results"
].cumsum()
progress_by_date_df["cum_number_of_results_progress"] = progress_by_date_df[
"number_of_results_progress"
].cumsum()
progress_by_date_df["progress"] = (
progress_by_date_df["number_of_results_progress"] / required_results
)
progress_by_date_df["cum_progress"] = (
progress_by_date_df["cum_number_of_results_progress"] / required_results
)
logger.info("calculated progress by date")
return progress_by_date_df
def get_contributors_by_date(results_df: pd.DataFrame) -> pd.DataFrame:
"""
for each project we retrospectively generate the following attributes for a given
date utilizing the results:
number_of_users:
- number of distinct users active per day
- not used in firebase
number_of_new_users:
- number of distinct users who mapped the first group in that project per day
- not used in firebase
cum_number_of_users:
- overall number of distinct users active up to that day
- refers to the project contributorCount attribute in firebase
"""
user_first_day_df = results_df.groupby(["user_id"]).agg(
first_day=pd.NamedAgg(column="day", aggfunc="min")
)
logger.info("calculated first day per user")
results_by_user_id_df = results_df.groupby(["project_id", "user_id", "day"]).agg(
number_of_results=pd.NamedAgg(column="user_id", aggfunc="count")
)
results_by_user_id_df = results_by_user_id_df.reset_index().merge(
user_first_day_df, left_on="user_id", right_on="user_id"
)
results_by_user_id_df["new_user"] = results_by_user_id_df.apply(
lambda row: is_new_user(row["day"], row["first_day"]), axis=1
)
contributors_by_date_df = (
results_by_user_id_df.reset_index()
.groupby(["project_id", "day"])
.agg(
number_of_users=pd.NamedAgg(column="user_id", aggfunc=pd.Series.nunique),
number_of_new_users=pd.NamedAgg(column="new_user", aggfunc="sum"),
)
)
contributors_by_date_df["cum_number_of_users"] = contributors_by_date_df[
"number_of_new_users"
].cumsum()
logger.info("calculated contributors by date")
return contributors_by_date_df
def METHOD_NAME(
results_df: pd.DataFrame, groups_df: pd.DataFrame
) -> pd.DataFrame:
"""
Calculate the progress df for every day based on results and groups df.
The Calculate the contributors for every day based on results df.
Merge both dataframes.
Return project history dataframe.
Parameters
----------
results_df
groups_df
"""
# calculate progress by date
progress_by_date_df = get_progress_by_date(results_df, groups_df)
# calculate contributors by date
contributors_by_date_df = get_contributors_by_date(results_df)
# merge contributors and progress
project_history_df = progress_by_date_df.merge(
contributors_by_date_df, left_on="day", right_on="day"
)
return project_history_df |
298,618 | test | import numpy as np
import unittest
from numba.core.compiler import compile_isolated
from numba.core.errors import TypingError
from numba import jit, typeof
from numba.core import types
from numba.tests.support import skip_m1_llvm_rtdyld_failure
a0 = np.array(42)
s1 = np.int32(64)
a1 = np.arange(12)
a2 = a1[::2]
a3 = a1.reshape((3, 4)).T
dt = np.dtype([('x', np.int8), ('y', 'S3')])
a4 = np.arange(32, dtype=np.int8).view(dt)
a5 = a4[::-2]
# A recognizable data string
a6 = np.frombuffer(b"XXXX_array_contents_XXXX", dtype=np.float32)
myarray = np.array([1, ])
def getitem0(i):
return a0[()]
def getitem1(i):
return a1[i]
def getitem2(i):
return a2[i]
def getitem3(i):
return a3[i]
def getitem4(i):
return a4[i]
def getitem5(i):
return a5[i]
def getitem6(i):
return a6[i]
def use_arrayscalar_const():
return s1
def write_to_global_array():
myarray[0] = 1
def bytes_as_const_array():
return np.frombuffer(b'foo', dtype=np.uint8)
class TestConstantArray(unittest.TestCase):
"""
Test array constants.
"""
def check_array_const(self, pyfunc):
cres = compile_isolated(pyfunc, (types.int32,))
cfunc = cres.entry_point
for i in [0, 1, 2]:
np.testing.assert_array_equal(pyfunc(i), cfunc(i))
def test_array_const_0d(self):
self.check_array_const(getitem0)
def test_array_const_1d_contig(self):
self.check_array_const(getitem1)
def test_array_const_1d_noncontig(self):
self.check_array_const(getitem2)
def test_array_const_2d(self):
self.check_array_const(getitem3)
def test_record_array_const_contig(self):
self.check_array_const(getitem4)
def test_record_array_const_noncontig(self):
self.check_array_const(getitem5)
def test_array_const_alignment(self):
"""
Issue #1933: the array declaration in the LLVM IR must have
the right alignment specified.
"""
sig = (types.intp,)
cfunc = jit(sig, nopython=True)(getitem6)
ir = cfunc.inspect_llvm(sig)
for line in ir.splitlines():
if 'XXXX_array_contents_XXXX' in line:
self.assertIn("constant [24 x i8]", line) # sanity check
# Should be the ABI-required alignment for float32
# on most platforms...
self.assertIn(", align 4", line)
break
else:
self.fail("could not find array declaration in LLVM IR")
def test_arrayscalar_const(self):
pyfunc = use_arrayscalar_const
cres = compile_isolated(pyfunc, ())
cfunc = cres.entry_point
self.assertEqual(pyfunc(), cfunc())
def test_write_to_global_array(self):
pyfunc = write_to_global_array
with self.assertRaises(TypingError):
compile_isolated(pyfunc, ())
def test_issue_1850(self):
"""
This issue is caused by an unresolved bug in numpy since version 1.6.
See numpy GH issue #3147.
"""
constarr = np.array([86])
def pyfunc():
return constarr[0]
cres = compile_isolated(pyfunc, ())
out = cres.entry_point()
self.assertEqual(out, 86)
@skip_m1_llvm_rtdyld_failure
def test_too_big_to_freeze(self):
"""
Test issue https://github.com/numba/numba/issues/2188 where freezing
a constant array into the code that's prohibitively long and consumes
too much RAM.
"""
def METHOD_NAME(biggie):
expect = np.copy(biggie)
self.assertEqual(typeof(biggie), typeof(expect))
def pyfunc():
return biggie
cres = compile_isolated(pyfunc, ())
# Check that the array is not frozen into the LLVM IR.
# LLVM size must be less than the array size.
self.assertLess(len(cres.library.get_llvm_str()), biggie.nbytes)
# Run and test result
out = cres.entry_point()
self.assertIs(biggie, out)
# Remove all local references to biggie
del out
biggie = None # del biggie is syntax error in py2
# Run again and verify result
out = cres.entry_point()
np.testing.assert_equal(expect, out)
self.assertEqual(typeof(expect), typeof(out))
nelem = 10**7 # 10 million items
c_array = np.arange(nelem).reshape(nelem)
f_array = np.asfortranarray(np.random.random((2, nelem // 2)))
self.assertEqual(typeof(c_array).layout, 'C')
self.assertEqual(typeof(f_array).layout, 'F')
# Test C contig
METHOD_NAME(c_array)
# Test F contig
METHOD_NAME(f_array)
class TestConstantBytes(unittest.TestCase):
def test_constant_bytes(self):
pyfunc = bytes_as_const_array
cres = compile_isolated(pyfunc, ())
cfunc = cres.entry_point
np.testing.assert_array_equal(pyfunc(), cfunc())
if __name__ == '__main__':
unittest.main() |
298,619 | best hamming | import sys
"""
reference: dict_trie
"""
if sys.version_info.major < 3:
from itertools import imap as map
def _add(root, word, count):
"""Add a word to a trie.
:arg dict root: Root of the trie.
:arg str word: A word.
:arg int count: Multiplicity of `word`.
"""
node = root
for char in word:
if char not in node:
node[char] = {}
node = node[char]
if '' not in node:
node[''] = 0
node[''] += count
def _find(root, word):
"""Find the node after following the path in a trie given by {word}.
:arg dict root: Root of the trie.
:arg str word: A word.
:returns dict: The node if found, {} otherwise.
"""
node = root
for char in word:
if char not in node:
return {}
node = node[char]
return node
def _remove(node, word, count):
"""Remove a word from a trie.
:arg dict node: Current node.
:arg str word: Word to be removed.
:arg int count: Multiplicity of `word`, force remove if this is -1.
:returns bool: True if the last occurrence of `word` is removed.
"""
if not word:
if '' in node:
node[''] -= count
if node[''] < 1 or count == -1:
node.pop('')
return True
return False
car, cdr = word[0], word[1:]
if car not in node:
return False
result = _remove(node[car], cdr, count)
if result:
if not node[car]:
node.pop(car)
return result
def _iterate(path, node, unique):
"""Convert a trie into a list.
:arg str path: Path taken so far to reach the current node.
:arg dict node: Current node.
:arg bool unique: Do not list multiplicities.
:returns iter: All words in a trie.
"""
if '' in node:
if not unique:
for _ in range(1, node['']):
yield path
yield path
for char in node:
if char:
for result in _iterate(path + char, node[char], unique):
yield result
def _fill(node, alphabet, length):
"""Make a full trie using the characters in {alphabet}.
:arg dict node: Current node.
:arg tuple alphabet: Used alphabet.
:arg int length: Length of the words to be generated.
:returns iter: Trie containing all words of length {length} over alphabet
{alphabet}.
"""
if not length:
node[''] = 1
return
for char in alphabet:
node[char] = {}
_fill(node[char], alphabet, length - 1)
def _hamming(path, node, word, distance, cigar):
"""Find all paths in a trie that are within a certain hamming distance of
{word}.
:arg str path: Path taken so far to reach the current node.
:arg dict node: Current node.
:arg str word: Query word.
:arg int distance: Amount of allowed errors.
:returns iter: All words in a trie that have Hamming distance of at most
{distance} to {word}.
"""
if distance < 0:
return
if not word:
if '' in node:
yield (path, distance, cigar)
return
car, cdr = word[0], word[1:]
for char in node:
if char:
if char == car:
penalty = 0
operation = '='
else:
penalty = 1
operation = 'X'
for result in _hamming(
path + char, node[char], cdr, distance - penalty,
cigar + operation):
yield result
def _levenshtein(path, node, word, distance, cigar):
"""Find all paths in a trie that are within a certain Levenshtein
distance of {word}.
:arg str path: Path taken so far to reach the current node.
:arg dict node: Current node.
:arg str word: Query word.
:arg int distance: Amount of allowed errors.
:returns iter: All words in a trie that have Hamming distance of at most
{distance} to {word}.
"""
if distance < 0:
return
if not word:
if '' in node:
yield (path, distance, cigar)
car, cdr = '', ''
else:
car, cdr = word[0], word[1:]
# Deletion.
for result in _levenshtein(path, node, cdr, distance - 1, cigar + 'D'):
yield result
for char in node:
if char:
# Substitution.
if car:
if char == car:
penalty = 0
operation = '='
else:
penalty = 1
operation = 'X'
for result in _levenshtein(
path + char, node[char], cdr, distance - penalty,
cigar + operation):
yield result
# Insertion.
for result in _levenshtein(
path + char, node[char], word, distance - 1, cigar + 'I'):
yield result
class Trie(object):
def __init__(self, words=None):
"""Initialise the class.
:arg list words: List of words.
"""
self.root = {}
if words:
for word in words:
self.add(word)
def __contains__(self, word):
return '' in _find(self.root, word)
def __iter__(self):
return _iterate('', self.root, True)
def list(self, unique=True):
return _iterate('', self.root, unique)
def add(self, word, count=1):
_add(self.root, word, count)
def get(self, word):
node = _find(self.root, word)
if '' in node:
return node['']
return None
def remove(self, word, count=1):
return _remove(self.root, word, count)
def has_prefix(self, word):
return _find(self.root, word) != {}
def fill(self, alphabet, length):
_fill(self.root, alphabet, length)
def all_hamming_(self, word, distance):
return map(
lambda x: (x[0], distance - x[1], x[2]),
_hamming('', self.root, word, distance, ''))
def all_hamming(self, word, distance):
return map(
lambda x: x[0], _hamming('', self.root, word, distance, ''))
def hamming(self, word, distance):
try:
return next(self.all_hamming(word, distance))
except StopIteration:
return None
def METHOD_NAME(self, word, distance):
"""Find the best match with {word} in a trie.
:arg str word: Query word.
:arg int distance: Maximum allowed distance.
:returns str: Best match with {word}.
"""
if self.get(word):
return word
for i in range(1, distance + 1):
result = self.hamming(word, i)
if result is not None:
return result
return None
def all_levenshtein_(self, word, distance):
return map(
lambda x: (x[0], distance - x[1], x[2]),
_levenshtein('', self.root, word, distance, ''))
def all_levenshtein(self, word, distance):
return map(
lambda x: x[0], _levenshtein('', self.root, word, distance, ''))
def levenshtein(self, word, distance):
try:
return next(self.all_levenshtein(word, distance))
except StopIteration:
return None
def best_levenshtein(self, word, distance):
"""Find the best match with {word} in a trie.
:arg str word: Query word.
:arg int distance: Maximum allowed distance.
:returns str: Best match with {word}.
"""
if self.get(word):
return word
for i in range(1, distance + 1):
result = self.levenshtein(word, i)
if result is not None:
return result
return None |
298,620 | tzname |
# Copyright 2010-2016 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''Thin wrapper for 'datetime' module from the standard library.
Provides timezone info for the local time. Based on example code
from standard library datetime documentation.
Main usage of this module is the function L{now()}. It imports all
from the standard datetime, so it can be used as a transparant
replacement.
Also adds a L{strfcal()} method and extends L{strftime()} to deal
with weeknumbers correctly.
'''
import re
import locale
from datetime import *
import logging
logger = logging.getLogger('zim')
def now():
'''Like C{datetime.now()} but with local timezone info'''
# Also setting microsecond to zero, to give isoformat() a nicer look
return datetime.now(LocalTimezone()).replace(microsecond=0)
# A class capturing the platform's idea of local time.
import time as _time
ZERO = timedelta(0)
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
'''Implementation of tzinfo with the current time zone, based on
the platform's idea of local time
'''
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def METHOD_NAME(self, dt):
return _time.METHOD_NAME[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
# Initialize setting for first day of the week. This is locale
# dependent, and the Gtk.Calendar widget already has good code to find it out.
# Unfortunately, the widget keeps that data private *%#*$()()*) !
MONDAY = 0 # iso calendar starts week at Monday
SUNDAY = 6
FIRST_DAY_OF_WEEK = None
def init_first_day_of_week():
global FIRST_DAY_OF_WEEK
try:
import babel
mylocale = babel.Locale(locale.getdefaultlocale()[0])
if mylocale.first_week_day == 0:
FIRST_DAY_OF_WEEK = MONDAY
else:
FIRST_DAY_OF_WEEK = SUNDAY
logger.debug('According to babel first day of week is %i', FIRST_DAY_OF_WEEK)
except Exception as e:
if not isinstance(e, ImportError):
logger.exception('Exception while loading \'babel\' library for first day of week')
# Fallback gleaned from gtkcalendar.c - hence the inconsistency
# with weekday numbers in iso calendar...
t = _("calendar:week_start:0")
# T: Translate to "calendar:week_start:0" if you want Sunday to be the first day of the week or to "calendar:week_start:1" if you want Monday to be the first day of the week
if t[-1] == '0':
FIRST_DAY_OF_WEEK = SUNDAY
elif t[-1] == '1':
FIRST_DAY_OF_WEEK = MONDAY
else:
logger.warning("Whoever translated 'calendar:week_start:0' did so wrongly.")
FIRST_DAY_OF_WEEK = SUNDAY
def dates_for_week(year, week):
'''Returns the first and last day of the week for a given
week number of a given year.
@param year: year as int (e.g. 2012)
@param week: week number as int (0 .. 53)
@returns: a 2-tuple of:
- a C{datetime.date} object for the start date of the week
- a C{datetime.date} object for the end dateof the week
@note: first day of the week can be either C{MONDAY} or C{SUNDAY},
this is configured in C{FIRST_DAY_OF_WEEK} based on the locale.
'''
# Note that the weeknumber in the isocalendar does NOT depend on the
# first day being Sunday or Monday, but on the first Thursday in the
# new year. See datetime.isocalendar() for details.
# If the year starts with e.g. a Friday, January 1st still belongs
# to week 53 of the previous year.
# Day of week in isocalendar starts with 1 for Mon and is 7 for Sun,
# and week starts on Monday.
if FIRST_DAY_OF_WEEK is None:
init_first_day_of_week()
jan1 = date(year, 1, 1)
_, jan1_week, jan1_weekday = jan1.isocalendar()
if FIRST_DAY_OF_WEEK == MONDAY:
days = jan1_weekday - 1
# if Jan 1 is a Monday, days is 0
else:
days = jan1_weekday
# if Jan 1 is a Monday, days is 1
# for Sunday it becomes 7 (or -1 week)
if jan1_week == 1:
weeks = week - 1
else:
# Jan 1st is still wk53 of the previous year
weeks = week
start = jan1 + timedelta(days=-days, weeks=weeks)
end = start + timedelta(days=6)
return start, end
def weekcalendar(date):
'''Get the year, week number and week day for a specific date.
Like C{datetime.date.isocalendar()} but takes into account
C{FIRST_DAY_OF_WEEK} correctly.
@param date: a C{datetime.date} or C{datetime.datetime} object
@returns: a year, a week number and a weekday as integers
The weekday numbering depends on locale, 1 is always first day
of the week, either a Sunday or a Monday.
'''
# Both strftime %W and %U are not correct, they use differnt
# week number count than the isocalendar. See datetime
# module for details.
# In short Jan 1st can still be week 53 of the previous year
# So we can use isocalendar(), however this does not take
# into accout FIRST_DAY_OF_WEEK, see comment in dates_for_week()
if FIRST_DAY_OF_WEEK is None:
init_first_day_of_week()
year, week, weekday = date.isocalendar()
if FIRST_DAY_OF_WEEK == SUNDAY and weekday == 7:
# iso calendar gives us the week ending this sunday,
# we want the next week
monday = date + timedelta(days=1)
year, week, weekday = monday.isocalendar()
elif FIRST_DAY_OF_WEEK == SUNDAY:
weekday += 1
return year, week, weekday
def strfcal(format, date):
'''Method similar to strftime, but dealing with the weeknumber,
day of the week and the year of that week.
Week 1 is the first week where the Thursday is in the new year. So e.g. the
last day of 2012 is a Monday. And therefore the calendar week for 31 Dec 2012
is already week 1 2013.
The locale decides whether a week starts on Monday (as the ISO standard would have
it) or on Sunday. So depending on your locale Sun 6 Jan 2013 is either still week
1 or already the first day of week 2.
Codes supported by this method:
- C{%w} is replaced by the weekday as a decimal number [1,7], with 1 representing
either Monday or Sunday depending on the locale
- C{%W} is replaced by the weeknumber depending on the locale
- C{%Y} is replaced by the year with century as a decimal number, the year depends
on the weeknumber depending on the locale
- C{%%} is replaced by %
Difference between this method and strftime is that:
1. It uses locale to determine the first day of the week
2. It returns the year that goes with the weeknumber
'''
# TODO: may want to add version of the codes that allow forcing
# Monday or Sunday as first day, e.g. using %u %U %X and %v %V %Z
year, week, weekday = weekcalendar(date)
def replacefunc(matchobj):
code = matchobj.group(0)
if code == '%w':
return str(weekday)
elif code == '%W':
return '%02d' % week
elif code == '%Y':
return str(year)
elif code == '%%':
return '%'
else:
return code # ignore unsupported codes
return re.sub(r'%.', replacefunc, format)
def strftime(format, date):
# TODO: deprecate this function
return date.strftime(format)
if __name__ == '__main__': #pragma: no cover
import gettext
gettext.install('zim', None, names=('_', 'gettext', 'ngettext'))
init_first_day_of_week()
if FIRST_DAY_OF_WEEK == SUNDAY:
print('First day of week: Sunday')
else:
print('First day of week: Monday')
print('Now:', now().isoformat(), strftime("%z, %Z", now()))
print('Calendar:', strfcal('day %w of week %W %Y', now())) |
298,621 | test chain builder construct chain vm configuration | import pytest
from eth_utils import (
ValidationError,
)
from eth.chains.base import (
MiningChain,
)
from eth.consensus.pow import (
check_pow,
)
from eth.tools.builder.chain import (
arrow_glacier_at,
berlin_at,
build,
byzantium_at,
chain_id,
constantinople_at,
disable_pow_check,
enable_pow_mining,
fork_at,
frontier_at,
genesis,
gray_glacier_at,
homestead_at,
istanbul_at,
latest_mainnet_at,
london_at,
muir_glacier_at,
name,
paris_at,
petersburg_at,
shanghai_at,
spurious_dragon_at,
tangerine_whistle_at,
)
from eth.vm.forks import (
ArrowGlacierVM,
BerlinVM,
ByzantiumVM,
ConstantinopleVM,
FrontierVM,
GrayGlacierVM,
HomesteadVM,
IstanbulVM,
LondonVM,
MuirGlacierVM,
ParisVM,
PetersburgVM,
ShanghaiVM,
SpuriousDragonVM,
TangerineWhistleVM,
)
def test_chain_builder_construct_chain_name():
chain = build(
MiningChain,
name("ChainForTest"),
)
assert issubclass(chain, MiningChain)
assert chain.__name__ == "ChainForTest"
def METHOD_NAME():
chain = build(
MiningChain,
fork_at(FrontierVM, 0),
)
assert issubclass(chain, MiningChain)
assert len(chain.vm_configuration) == 1
assert chain.vm_configuration[0][0] == 0
assert chain.vm_configuration[0][1] == FrontierVM
def test_chain_builder_construct_chain_vm_configuration_multiple_forks():
chain = build(
MiningChain,
fork_at(FrontierVM, 0),
fork_at(HomesteadVM, 5),
)
assert issubclass(chain, MiningChain)
assert len(chain.vm_configuration) == 2
assert chain.vm_configuration[0][0] == 0
assert chain.vm_configuration[0][1] == FrontierVM
assert chain.vm_configuration[1][0] == 5
assert chain.vm_configuration[1][1] == HomesteadVM
@pytest.mark.parametrize(
"fork_fn,vm_class",
(
(frontier_at, FrontierVM),
(homestead_at, HomesteadVM),
(tangerine_whistle_at, TangerineWhistleVM),
(spurious_dragon_at, SpuriousDragonVM),
(byzantium_at, ByzantiumVM),
(constantinople_at, ConstantinopleVM),
(petersburg_at, PetersburgVM),
(istanbul_at, IstanbulVM),
(muir_glacier_at, MuirGlacierVM),
(berlin_at, BerlinVM),
(london_at, LondonVM),
(arrow_glacier_at, ArrowGlacierVM),
(gray_glacier_at, GrayGlacierVM),
(paris_at, ParisVM),
(shanghai_at, ShanghaiVM),
(
latest_mainnet_at,
ShanghaiVM,
), # this will change whenever the next upgrade is locked
),
)
def test_chain_builder_construct_chain_fork_specific_helpers(fork_fn, vm_class):
class DummyVM(FrontierVM):
pass
class ChainForTest(MiningChain):
vm_configuration = ((0, DummyVM),)
chain = build(
ChainForTest,
fork_fn(12),
)
assert issubclass(chain, MiningChain)
assert len(chain.vm_configuration) == 2
assert chain.vm_configuration[0][0] == 0
assert chain.vm_configuration[0][1] is DummyVM
assert chain.vm_configuration[1][0] == 12
assert chain.vm_configuration[1][1] is vm_class
def test_chain_builder_enable_pow_mining():
chain = build(
MiningChain,
frontier_at(0),
enable_pow_mining(),
genesis(),
)
block = chain.mine_block()
check_pow(
block.number,
block.header.mining_hash,
block.header.mix_hash,
block.header.nonce,
block.header.difficulty,
)
def test_chain_builder_without_any_mining_config():
chain = build(
MiningChain,
frontier_at(0),
genesis(),
)
with pytest.raises(ValidationError, match="mix hash mismatch"):
chain.mine_block()
def test_chain_builder_disable_pow_check():
chain = build(
MiningChain,
frontier_at(0),
disable_pow_check(),
genesis(),
)
block = chain.mine_block()
with pytest.raises(ValidationError, match="mix hash mismatch"):
check_pow(
block.number,
block.header.mining_hash,
block.header.mix_hash,
block.header.nonce,
block.header.difficulty,
)
def test_chain_builder_chain_id():
chain = build(
MiningChain,
chain_id(1234),
)
assert chain.chain_id == 1234 |
298,622 | get share | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetShareResult',
'AwaitableGetShareResult',
'get_share',
'get_share_output',
]
@pulumi.output_type
class GetShareResult:
"""
A share data transfer object.
"""
def __init__(__self__, created_at=None, description=None, id=None, name=None, provisioning_state=None, share_kind=None, system_data=None, terms=None, type=None, user_email=None, user_name=None):
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if share_kind and not isinstance(share_kind, str):
raise TypeError("Expected argument 'share_kind' to be a str")
pulumi.set(__self__, "share_kind", share_kind)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if terms and not isinstance(terms, str):
raise TypeError("Expected argument 'terms' to be a str")
pulumi.set(__self__, "terms", terms)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if user_email and not isinstance(user_email, str):
raise TypeError("Expected argument 'user_email' to be a str")
pulumi.set(__self__, "user_email", user_email)
if user_name and not isinstance(user_name, str):
raise TypeError("Expected argument 'user_name' to be a str")
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
"""
Time at which the share was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Share description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource id of the azure resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Gets or sets the provisioning state
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="shareKind")
def share_kind(self) -> Optional[str]:
"""
Share kind.
"""
return pulumi.get(self, "share_kind")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
System Data of the Azure resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def terms(self) -> Optional[str]:
"""
Share terms.
"""
return pulumi.get(self, "terms")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userEmail")
def user_email(self) -> str:
"""
Email of the user who created the resource
"""
return pulumi.get(self, "user_email")
@property
@pulumi.getter(name="userName")
def user_name(self) -> str:
"""
Name of the user who created the resource
"""
return pulumi.get(self, "user_name")
class AwaitableGetShareResult(GetShareResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetShareResult(
created_at=self.created_at,
description=self.description,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
share_kind=self.share_kind,
system_data=self.system_data,
terms=self.terms,
type=self.type,
user_email=self.user_email,
user_name=self.user_name)
def METHOD_NAME(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetShareResult:
"""
Get a share
Azure REST API version: 2021-08-01.
:param str account_name: The name of the share account.
:param str resource_group_name: The resource group name.
:param str share_name: The name of the share to retrieve.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['shareName'] = share_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:datashare:getShare', __args__, opts=opts, typ=GetShareResult).value
return AwaitableGetShareResult(
created_at=pulumi.get(__ret__, 'created_at'),
description=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
share_kind=pulumi.get(__ret__, 'share_kind'),
system_data=pulumi.get(__ret__, 'system_data'),
terms=pulumi.get(__ret__, 'terms'),
type=pulumi.get(__ret__, 'type'),
user_email=pulumi.get(__ret__, 'user_email'),
user_name=pulumi.get(__ret__, 'user_name'))
@_utilities.lift_output_func(METHOD_NAME)
def get_share_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetShareResult]:
"""
Get a share
Azure REST API version: 2021-08-01.
:param str account_name: The name of the share account.
:param str resource_group_name: The resource group name.
:param str share_name: The name of the share to retrieve.
"""
... |
298,623 | test metadata filter default | from copy import deepcopy
import pytest
from nbformat.v4.nbbase import new_code_cell, new_notebook
from jupytext import reads, writes
from jupytext.cli import jupytext as jupytext_cli
from jupytext.compare import compare, compare_notebooks
from jupytext.metadata_filter import filter_metadata, metadata_filter_as_dict
from .utils import requires_myst
def to_dict(keys):
return {key: None for key in keys}
@pytest.mark.parametrize(
"metadata_filter_string,metadata_filter_dict",
[
(
"all, -widgets,-varInspector",
{"additional": "all", "excluded": ["widgets", "varInspector"]},
),
("toc", {"additional": ["toc"]}),
("+ toc", {"additional": ["toc"]}),
("preserve,-all", {"additional": ["preserve"], "excluded": "all"}),
(
"ExecuteTime, autoscroll, -hide_output",
{"additional": ["ExecuteTime", "autoscroll"], "excluded": ["hide_output"]},
),
],
)
def test_string_to_dict_conversion(metadata_filter_string, metadata_filter_dict):
assert metadata_filter_as_dict(metadata_filter_string) == metadata_filter_dict
def test_metadata_filter_as_dict():
assert metadata_filter_as_dict(True) == metadata_filter_as_dict("all")
assert metadata_filter_as_dict(False) == metadata_filter_as_dict("-all")
assert metadata_filter_as_dict({"excluded": "all"}) == metadata_filter_as_dict(
"-all"
)
def METHOD_NAME():
assert filter_metadata(
to_dict(["technical", "user", "preserve"]), None, "-technical"
) == to_dict(["user", "preserve"])
assert filter_metadata(
to_dict(["technical", "user", "preserve"]), None, "preserve,-all"
) == to_dict(["preserve"])
def test_metadata_filter_user_plus_default():
assert filter_metadata(
to_dict(["technical", "user", "preserve"]), "-user", "-technical"
) == to_dict(["preserve"])
assert filter_metadata(
to_dict(["technical", "user", "preserve"]), "all,-user", "-technical"
) == to_dict(["preserve", "technical"])
assert filter_metadata(
to_dict(["technical", "user", "preserve"]), "user", "preserve,-all"
) == to_dict(["user", "preserve"])
def test_metadata_filter_user_overrides_default():
assert filter_metadata(
to_dict(["technical", "user", "preserve"]), "all,-user", "-technical"
) == to_dict(["technical", "preserve"])
assert filter_metadata(
to_dict(["technical", "user", "preserve"]), "user,-all", "preserve"
) == to_dict(["user"])
def test_negative_cell_metadata_filter():
assert filter_metadata(to_dict(["exectime"]), "-linesto", "-exectime") == to_dict(
[]
)
def test_cell_metadata_filter_is_updated():
text = """---
jupyter:
jupytext:
cell_metadata_filter: -all
---
```{r cache=FALSE}
1+1
```
"""
nb = reads(text, "Rmd")
assert nb.metadata["jupytext"]["cell_metadata_filter"] == "cache,-all"
text2 = writes(nb, "Rmd")
assert text.splitlines()[-3:] == text2.splitlines()[-3:]
def test_notebook_metadata_all():
nb = new_notebook(
metadata={
"user_metadata": [1, 2, 3],
"jupytext": {"notebook_metadata_filter": "all"},
}
)
text = writes(nb, "md")
assert "user_metadata" in text
def test_notebook_metadata_none():
nb = new_notebook(metadata={"jupytext": {"notebook_metadata_filter": "-all"}})
text = writes(nb, "md")
assert "---" not in text
def test_filter_nested_metadata():
metadata = {"I": {"1": {"a": 1, "b": 2}}}
assert filter_metadata(metadata, "I", "-all") == {"I": {"1": {"a": 1, "b": 2}}}
assert filter_metadata(metadata, "-I") == {}
assert filter_metadata(metadata, "I.1.a", "-all") == {"I": {"1": {"a": 1}}}
assert filter_metadata(metadata, "-I.1.b") == {"I": {"1": {"a": 1}}}
assert filter_metadata(metadata, "-I.1.b", "I") == {"I": {"1": {"a": 1}}}
# That one is not supported yet
# assert filter_metadata(metadata, 'I.1.a', '-I') == {'I': {'1': {'a': 1}}}
def test_filter_out_execution_metadata():
nb = new_notebook(
cells=[
new_code_cell(
"1 + 1",
metadata={
"execution": {
"iopub.execute_input": "2020-10-12T19:13:45.306603Z",
"iopub.status.busy": "2020-10-12T19:13:45.306233Z",
"iopub.status.idle": "2020-10-12T19:13:45.316103Z",
"shell.execute_reply": "2020-10-12T19:13:45.315429Z",
"shell.execute_reply.started": "2020-10-12T19:13:45.306577Z",
}
},
)
]
)
text = writes(nb, fmt="py:percent")
assert "execution" not in text
def test_default_config_has_priority_over_current_metadata(
tmpdir,
text="""# %% some_metadata_key=5
1 + 1
""",
):
py_file = tmpdir.join("notebook.py")
py_file.write(text)
cfg_file = tmpdir.join("jupytext.toml")
cfg_file.write(
"""cell_metadata_filter = "-some_metadata_key"
"""
)
jupytext_cli([str(py_file), "--to", "py"])
assert (
py_file.read()
== """# %%
1 + 1
"""
)
@requires_myst
def test_metadata_filter_in_notebook_757():
md = """---
jupytext:
cell_metadata_filter: all,-hidden,-heading_collapsed
notebook_metadata_filter: all,-language_info,-toc,-jupytext.text_representation.jupytext_version,-jupytext.text_representation.format_version
text_representation:
extension: .md
format_name: myst
kernelspec:
display_name: Python 3
language: python
name: python3
nbhosting:
title: 'Exercice: Taylor'
---
```python
1 + 1
```
""" # noqa
nb = reads(md, fmt="md:myst")
assert nb.metadata["jupytext"]["notebook_metadata_filter"] == ",".join(
[
"all",
"-language_info",
"-toc",
"-jupytext.text_representation.jupytext_version",
"-jupytext.text_representation.format_version",
]
)
md2 = writes(nb, fmt="md:myst")
compare(md2, md)
for fmt in ["py:light", "py:percent", "md"]:
text = writes(nb, fmt=fmt)
nb2 = reads(text, fmt=fmt)
compare_notebooks(nb2, nb, fmt=fmt)
ref_metadata = deepcopy(nb.metadata)
del ref_metadata["jupytext"]["text_representation"]
del nb2.metadata["jupytext"]["text_representation"]
compare(nb2.metadata, ref_metadata) |
298,624 | test rst2html | import pytest
from napari._qt.widgets.qt_plugin_sorter import QtPluginSorter, rst2html
@pytest.mark.parametrize(
'text,expected_text',
[
("", ""),
(
"""Return a function capable of loading ``path`` into napari, or ``None``.
This is the primary "**reader plugin**" function. It accepts a path or
list of paths, and returns a list of data to be added to the ``Viewer``.
The function may return ``[(None, )]`` to indicate that the file was read
successfully, but did not contain any data.
The main place this hook is used is in :func:`Viewer.open()
<napari.components.viewer_model.ViewerModel.open>`, via the
:func:`~napari.plugins.io.read_data_with_plugins` function.
It will also be called on ``File -> Open...`` or when a user drops a file
or folder onto the viewer. This function must execute **quickly**, and
should return ``None`` if the filepath is of an unrecognized format for
this reader plugin. If ``path`` is determined to be recognized format,
this function should return a *new* function that accepts the same filepath
(or list of paths), and returns a list of ``LayerData`` tuples, where each
tuple is a 1-, 2-, or 3-tuple of ``(data,)``, ``(data, meta)``, or ``(data,
meta, layer_type)``.
``napari`` will then use each tuple in the returned list to generate a new
layer in the viewer using the :func:`Viewer._add_layer_from_data()
<napari.components.viewer_model.ViewerModel._add_layer_from_data>`
method. The first, (optional) second, and (optional) third items in each
tuple in the returned layer_data list, therefore correspond to the
``data``, ``meta``, and ``layer_type`` arguments of the
:func:`Viewer._add_layer_from_data()
<napari.components.viewer_model.ViewerModel._add_layer_from_data>`
method, respectively.
.. important::
``path`` may be either a ``str`` or a ``list`` of ``str``. If a
``list``, then each path in the list can be assumed to be one part of a
larger multi-dimensional stack (for instance: a list of 2D image files
that should be stacked along a third axis). Implementations should do
their own checking for ``list`` or ``str``, and handle each case as
desired.""",
'Return a function capable of loading <code>path</code> into napari, or <code>None</code>.<br><br> '
'This is the primary "<strong>reader plugin</strong>" function. It accepts a path or<br> '
'list of paths, and returns a list of data to be added to the <code>Viewer</code>.<br> '
'The function may return <code>[(None, )]</code> to indicate that the file was read<br> '
'successfully, but did not contain any data.<br><br> '
'The main place this hook is used is in <code>Viewer.open()</code>, via the<br> '
'<code>read_data_with_plugins</code> function.<br><br> '
'It will also be called on <code>File -> Open...</code> or when a user drops a file<br> '
'or folder onto the viewer. This function must execute <strong>quickly</strong>, and<br> '
'should return <code>None</code> if the filepath is of an unrecognized format for<br> '
'this reader plugin. If <code>path</code> is determined to be recognized format,<br> '
'this function should return a <em>new</em> function that accepts the same filepath<br> '
'(or list of paths), and returns a list of <code>LayerData</code> tuples, where each<br> '
'tuple is a 1-, 2-, or 3-tuple of <code>(data,)</code>, <code>(data, meta)</code>, or <code>(data,<br> '
'meta, layer_type)</code>.<br><br> <code>napari</code> will then use each tuple in the returned list to generate a new<br> '
'layer in the viewer using the <code>Viewer._add_layer_from_data()</code><br> '
'method. The first, (optional) second, and (optional) third items in each<br> '
'tuple in the returned layer_data list, therefore correspond to the<br> '
'<code>data</code>, <code>meta</code>, and <code>layer_type</code> arguments of the<br> '
'<code>Viewer._add_layer_from_data()</code><br> method, respectively.<br><br> .. important::<br><br>'
' <code>path</code> may be either a <code>str</code> or a <code>list</code> of <code>str</code>. If a<br>'
' <code>list</code>, then each path in the list can be assumed to be one part of a<br> '
'larger multi-dimensional stack (for instance: a list of 2D image files<br> '
'that should be stacked along a third axis). Implementations should do<br> '
'their own checking for <code>list</code> or <code>str</code>, and handle each case as<br> '
'desired.',
),
],
)
def METHOD_NAME(text, expected_text):
assert rst2html(text) == expected_text
def test_create_qt_plugin_sorter(qtbot):
plugin_sorter = QtPluginSorter()
qtbot.addWidget(plugin_sorter)
# Check initial hook combobox items
hook_combo_box = plugin_sorter.hook_combo_box
combobox_items = [
hook_combo_box.itemText(idx) for idx in range(hook_combo_box.count())
]
assert combobox_items == [
'select hook... ',
'get_reader',
'get_writer',
'write_image',
'write_labels',
'write_points',
'write_shapes',
'write_surface',
'write_vectors',
]
@pytest.mark.parametrize(
"hook_name,help_info",
[
('select hook... ', ''),
(
'get_reader',
'This is the primary "<strong>reader plugin</strong>" function. It accepts a path or<br> list of paths, and returns a list of data to be added to the <code>Viewer</code>.<br>',
),
(
'get_writer',
'This function will be called whenever the user attempts to save multiple<br> layers (e.g. via <code>File -> Save Layers</code>, or<br> <code>save_layers</code>).<br>',
),
(
'write_labels',
'It is the responsibility of the implementation to check any extension on<br> <code>path</code> and return <code>None</code> if it is an unsupported extension.',
),
(
'write_points',
'It is the responsibility of the implementation to check any extension on<br> <code>path</code> and return <code>None</code> if it is an unsupported extension.',
),
(
'write_shapes',
'It is the responsibility of the implementation to check any extension on<br> <code>path</code> and return <code>None</code> if it is an unsupported extension.',
),
(
'write_surface',
'It is the responsibility of the implementation to check any extension on<br> <code>path</code> and return <code>None</code> if it is an unsupported extension.',
),
(
'write_vectors',
'It is the responsibility of the implementation to check any extension on<br> <code>path</code> and return <code>None</code> if it is an unsupported extension.',
),
],
)
def test_qt_plugin_sorter_help_info(qtbot, hook_name, help_info):
plugin_sorter = QtPluginSorter()
qtbot.addWidget(plugin_sorter)
# Check hook combobox items help tooltip in the info widget
info_widget = plugin_sorter.info
hook_combo_box = plugin_sorter.hook_combo_box
hook_combo_box.setCurrentText(hook_name)
assert help_info in info_widget.toolTip() |
298,625 | literal convert | #########################################################################
#
# Copyright (C) 2021 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import ast
import typing
import logging
from datetime import datetime
from inspect import signature, Signature, Parameter
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from geonode.celery_app import app
from geonode.base.models import ResourceBase
from geonode.resource.manager import resource_manager
from geonode.tasks.tasks import AcquireLock, FaultTolerantTask
from .utils import resolve_type_serializer
from ..models import ExecutionRequest
logger = logging.getLogger(__name__)
def _get_param_value(_param, _input_value):
_param_value = None
if _param.annotation == typing.Union[object, None]:
_param_value = resolve_type_serializer(_input_value)[0]
elif _param.annotation == ResourceBase:
_param_value = ResourceBase.objects.get(id=_input_value)
elif _param.annotation == settings.AUTH_USER_MODEL:
_param_value = get_user_model().objects.get(username=_input_value)
elif _param.annotation in (dict, list, tuple) and isinstance(_input_value, str):
_param_value = _param.annotation(ast.literal_eval(_input_value))
for _key in ["user", "owner"]:
_username = _param_value.pop(_key, None)
if _username:
_param_value[_key] = get_user_model().objects.get(username=_username)
else:
try:
def METHOD_NAME(_v):
return ast.literal_eval(_v) if isinstance(_v, str) else _v
_value = _input_value
if "typing.List" in str(_param.annotation):
_param_value = list(METHOD_NAME(_value))
elif "typing.Dict" in str(_param.annotation):
_param_value = dict(METHOD_NAME(_value))
elif "typing.Tuple" in str(_param.annotation):
_param_value = tuple(METHOD_NAME(_value))
else:
_param_value = _param.annotation(_input_value)
except TypeError:
_param_value = _input_value
return _param_value
@app.task(
bind=True,
base=FaultTolerantTask,
queue="geonode",
expires=30,
time_limit=600,
acks_late=False,
ignore_result=False,
)
def resouce_service_dispatcher(self, execution_id: str):
"""Performs a Resource Service request asynchronously.
This is the main Resource Service API dispatcher.
The method looks for avaialable `ExecutionRequests` with status `READY` and triggers the
`func_name` method of the `resource_manager` with the `input_params`.
It finally updates the `status` of the request.
A client is able to query the `status_url` endpoint in order to get the current `status` other than
the `output_params`.
"""
with AcquireLock(execution_id) as lock:
if lock.acquire() is True:
try:
_exec_request = ExecutionRequest.objects.filter(exec_id=execution_id)
if _exec_request.exists():
_request = _exec_request.get()
if _request.status == ExecutionRequest.STATUS_READY:
_exec_request.update(status=ExecutionRequest.STATUS_RUNNING)
_request.refresh_from_db()
if hasattr(resource_manager, _request.func_name):
try:
_signature = signature(getattr(resource_manager, _request.func_name))
_args = []
_kwargs = {}
for _param_name in _signature.parameters:
if _request.input_params and _request.input_params.get(_param_name, None):
_param = _signature.parameters.get(_param_name)
_param_value = _get_param_value(_param, _request.input_params.get(_param_name))
if _param.kind == Parameter.POSITIONAL_ONLY:
_args.append(_param_value)
else:
_kwargs[_param_name] = _param_value
_bindings = _signature.bind(*_args, **_kwargs)
_bindings.apply_defaults()
_output = getattr(resource_manager, _request.func_name)(
*_bindings.args, **_bindings.kwargs
)
_output_params = {}
if _output is not None and _signature.return_annotation != Signature.empty:
if _signature.return_annotation.__module__ == "builtins":
_output_params = {"output": _output}
elif _signature.return_annotation == ResourceBase or isinstance(
_output, ResourceBase
):
_output_params = {"output": {"uuid": _output.uuid}}
else:
_output_params = {"output": None}
_exec_request.update(
status=ExecutionRequest.STATUS_FINISHED,
finished=datetime.now(),
output_params=_output_params,
)
_request.refresh_from_db()
except Exception as e:
logger.exception(e)
_exec_request.update(
status=ExecutionRequest.STATUS_FAILED,
finished=datetime.now(),
output_params={
"error": _(
f"Error occurred while executin the operation: '{_request.func_name}'"
),
"exception": str(e),
},
)
_request.refresh_from_db()
else:
logger.warning(_(f"Could not find the operation name: '{_request.func_name}'"))
_request.refresh_from_db()
finally:
lock.release()
logger.debug(f"WARNING: The requested ExecutionRequest with 'exec_id'={execution_id} was not found!") |
298,626 | negative answer | # SPDX-License-Identifier: GPL-3.0
# Copyright (c) 2014-2023 William Edwards <shadowapex@gmail.com>, Benjamin Bean <superman2k5@gmail.com>
from __future__ import annotations
import logging
import os
from base64 import b64decode
from typing import Optional
import pygame
from pygame import Rect
from tuxemon import prepare, save, tools
from tuxemon.locale import T
from tuxemon.menu.interface import MenuItem
from tuxemon.menu.menu import PopUpMenu
from tuxemon.save import get_save_path
from tuxemon.session import local_session
from tuxemon.tools import open_dialog
from tuxemon.ui import text
logger = logging.getLogger(__name__)
cfgcheck = prepare.CONFIG
class SaveMenuState(PopUpMenu[None]):
number_of_slots = 3
shrink_to_items = True
def __init__(self, selected_index: Optional[int] = None) -> None:
if selected_index is None:
selected_index = save.slot_number or 0
super().__init__(selected_index=selected_index)
def initialize_items(self) -> None:
empty_image = None
rect = self.client.screen.get_rect()
slot_rect = Rect(0, 0, rect.width * 0.80, rect.height // 6)
for i in range(self.number_of_slots):
# Check to see if a save exists for the current slot
save_path = get_save_path(i + 1)
if os.path.exists(save_path):
image = self.render_slot(slot_rect, i + 1)
item = MenuItem(image, T.translate("menu_save"), None, None)
self.add(item)
else:
if not empty_image:
empty_image = self.render_empty_slot(slot_rect)
item = MenuItem(empty_image, "SAVE", None, None)
self.add(item)
def render_empty_slot(
self,
rect: pygame.rect.Rect,
) -> pygame.surface.Surface:
slot_image = pygame.Surface(rect.size, pygame.SRCALPHA)
rect = rect.move(0, rect.height // 2 - 10)
text.draw_text(
slot_image,
T.translate("empty_slot"),
rect,
font=self.font,
)
return slot_image
def render_slot(
self,
rect: pygame.rect.Rect,
slot_num: int,
) -> pygame.surface.Surface:
slot_image = pygame.Surface(rect.size, pygame.SRCALPHA)
# Try and load the save game and draw details about the save
save_data = save.load(slot_num)
assert save_data
if "screenshot" in save_data:
screenshot = b64decode(save_data["screenshot"])
size = (
save_data["screenshot_width"],
save_data["screenshot_height"],
)
thumb_image = pygame.image.frombuffer(
screenshot,
size,
"RGB",
).convert()
thumb_rect = thumb_image.get_rect().fit(rect)
thumb_image = pygame.transform.smoothscale(
thumb_image,
thumb_rect.size,
)
else:
thumb_rect = rect.copy()
thumb_rect.width //= 5
thumb_image = pygame.Surface(thumb_rect.size)
thumb_image.fill((255, 255, 255))
if "error" in save_data:
red = (255, 0, 0)
pygame.draw.line(thumb_image, red, [0, 0], thumb_rect.size, 3)
pygame.draw.line(
thumb_image,
red,
[0, thumb_rect.height],
[thumb_rect.width, 0],
3,
)
# Draw the screenshot
slot_image.blit(thumb_image, (rect.width * 0.20, 0))
# Draw the slot text
rect = rect.move(0, rect.height // 2 - 10)
text.draw_text(
slot_image,
T.translate("slot") + " " + str(slot_num),
rect,
font=self.font,
)
x = int(rect.width * 0.5)
text.draw_text(
slot_image,
save_data["player_name"],
(x, 0, 500, 500),
font=self.font,
)
if "error" not in save_data:
text.draw_text(
slot_image,
save_data["time"],
(x, 50, 500, 500),
font=self.font,
)
return slot_image
def save(self) -> None:
logger.info("Saving!")
try:
save_data = save.get_save_data(
local_session,
)
save.save(
save_data,
self.selected_index + 1,
)
save.slot_number = self.selected_index
except Exception as e:
raise
logger.error("Unable to save game!!")
logger.error(e)
open_dialog(local_session, [T.translate("save_failure")])
else:
open_dialog(local_session, [T.translate("save_success")])
def on_menu_selection(self, menuitem: MenuItem[None]) -> None:
def positive_answer() -> None:
self.client.pop_state() # close confirmation menu
self.client.pop_state() # close save menu
self.save()
def METHOD_NAME() -> None:
self.client.pop_state() # close confirmation menu
def ask_confirmation() -> None:
# open menu to confirm the save
tools.open_choice_dialog(
local_session,
menu=(
(
"overwrite",
T.translate("save_overwrite"),
positive_answer,
),
(
"keep",
T.translate("save_keep"),
METHOD_NAME,
),
),
escape_key_exits=True,
)
save_data = save.load(self.selected_index + 1)
if save_data:
ask_confirmation()
else:
self.client.pop_state() # close save menu
self.save() |
298,627 | register optionflag | import types
import unittest
from _typeshed import ExcInfo
from collections.abc import Callable
from typing import Any, NamedTuple
from typing_extensions import TypeAlias
__all__ = [
"register_optionflag",
"DONT_ACCEPT_TRUE_FOR_1",
"DONT_ACCEPT_BLANKLINE",
"NORMALIZE_WHITESPACE",
"ELLIPSIS",
"SKIP",
"IGNORE_EXCEPTION_DETAIL",
"COMPARISON_FLAGS",
"REPORT_UDIFF",
"REPORT_CDIFF",
"REPORT_NDIFF",
"REPORT_ONLY_FIRST_FAILURE",
"REPORTING_FLAGS",
"FAIL_FAST",
"Example",
"DocTest",
"DocTestParser",
"DocTestFinder",
"DocTestRunner",
"OutputChecker",
"DocTestFailure",
"UnexpectedException",
"DebugRunner",
"testmod",
"testfile",
"run_docstring_examples",
"DocTestSuite",
"DocFileSuite",
"set_unittest_reportflags",
"script_from_examples",
"testsource",
"debug_src",
"debug",
]
class TestResults(NamedTuple):
failed: int
attempted: int
OPTIONFLAGS_BY_NAME: dict[str, int]
def METHOD_NAME(name: str) -> int: ...
DONT_ACCEPT_TRUE_FOR_1: int
DONT_ACCEPT_BLANKLINE: int
NORMALIZE_WHITESPACE: int
ELLIPSIS: int
SKIP: int
IGNORE_EXCEPTION_DETAIL: int
COMPARISON_FLAGS: int
REPORT_UDIFF: int
REPORT_CDIFF: int
REPORT_NDIFF: int
REPORT_ONLY_FIRST_FAILURE: int
FAIL_FAST: int
REPORTING_FLAGS: int
BLANKLINE_MARKER: str
ELLIPSIS_MARKER: str
class Example:
source: str
want: str
exc_msg: str | None
lineno: int
indent: int
options: dict[int, bool]
def __init__(
self,
source: str,
want: str,
exc_msg: str | None = None,
lineno: int = 0,
indent: int = 0,
options: dict[int, bool] | None = None,
) -> None: ...
def __hash__(self) -> int: ...
def __eq__(self, other: object) -> bool: ...
class DocTest:
examples: list[Example]
globs: dict[str, Any]
name: str
filename: str | None
lineno: int | None
docstring: str | None
def __init__(
self,
examples: list[Example],
globs: dict[str, Any],
name: str,
filename: str | None,
lineno: int | None,
docstring: str | None,
) -> None: ...
def __hash__(self) -> int: ...
def __lt__(self, other: DocTest) -> bool: ...
def __eq__(self, other: object) -> bool: ...
class DocTestParser:
def parse(self, string: str, name: str = "<string>") -> list[str | Example]: ...
def get_doctest(self, string: str, globs: dict[str, Any], name: str, filename: str | None, lineno: int | None) -> DocTest: ...
def get_examples(self, string: str, name: str = "<string>") -> list[Example]: ...
class DocTestFinder:
def __init__(
self, verbose: bool = False, parser: DocTestParser = ..., recurse: bool = True, exclude_empty: bool = True
) -> None: ...
def find(
self,
obj: object,
name: str | None = None,
module: None | bool | types.ModuleType = None,
globs: dict[str, Any] | None = None,
extraglobs: dict[str, Any] | None = None,
) -> list[DocTest]: ...
_Out: TypeAlias = Callable[[str], object]
class DocTestRunner:
DIVIDER: str
optionflags: int
original_optionflags: int
tries: int
failures: int
test: DocTest
def __init__(self, checker: OutputChecker | None = None, verbose: bool | None = None, optionflags: int = 0) -> None: ...
def report_start(self, out: _Out, test: DocTest, example: Example) -> None: ...
def report_success(self, out: _Out, test: DocTest, example: Example, got: str) -> None: ...
def report_failure(self, out: _Out, test: DocTest, example: Example, got: str) -> None: ...
def report_unexpected_exception(self, out: _Out, test: DocTest, example: Example, exc_info: ExcInfo) -> None: ...
def run(
self, test: DocTest, compileflags: int | None = None, out: _Out | None = None, clear_globs: bool = True
) -> TestResults: ...
def summarize(self, verbose: bool | None = None) -> TestResults: ...
def merge(self, other: DocTestRunner) -> None: ...
class OutputChecker:
def check_output(self, want: str, got: str, optionflags: int) -> bool: ...
def output_difference(self, example: Example, got: str, optionflags: int) -> str: ...
class DocTestFailure(Exception):
test: DocTest
example: Example
got: str
def __init__(self, test: DocTest, example: Example, got: str) -> None: ...
class UnexpectedException(Exception):
test: DocTest
example: Example
exc_info: ExcInfo
def __init__(self, test: DocTest, example: Example, exc_info: ExcInfo) -> None: ...
class DebugRunner(DocTestRunner): ...
master: DocTestRunner | None
def testmod(
m: types.ModuleType | None = None,
name: str | None = None,
globs: dict[str, Any] | None = None,
verbose: bool | None = None,
report: bool = True,
optionflags: int = 0,
extraglobs: dict[str, Any] | None = None,
raise_on_error: bool = False,
exclude_empty: bool = False,
) -> TestResults: ...
def testfile(
filename: str,
module_relative: bool = True,
name: str | None = None,
package: None | str | types.ModuleType = None,
globs: dict[str, Any] | None = None,
verbose: bool | None = None,
report: bool = True,
optionflags: int = 0,
extraglobs: dict[str, Any] | None = None,
raise_on_error: bool = False,
parser: DocTestParser = ...,
encoding: str | None = None,
) -> TestResults: ...
def run_docstring_examples(
f: object,
globs: dict[str, Any],
verbose: bool = False,
name: str = "NoName",
compileflags: int | None = None,
optionflags: int = 0,
) -> None: ...
def set_unittest_reportflags(flags: int) -> int: ...
class DocTestCase(unittest.TestCase):
def __init__(
self,
test: DocTest,
optionflags: int = 0,
setUp: Callable[[DocTest], Any] | None = None,
tearDown: Callable[[DocTest], Any] | None = None,
checker: OutputChecker | None = None,
) -> None: ...
def runTest(self) -> None: ...
def format_failure(self, err: str) -> str: ...
def __hash__(self) -> int: ...
def __eq__(self, other: object) -> bool: ...
class SkipDocTestCase(DocTestCase):
def __init__(self, module: types.ModuleType) -> None: ...
def test_skip(self) -> None: ...
class _DocTestSuite(unittest.TestSuite): ...
def DocTestSuite(
module: None | str | types.ModuleType = None,
globs: dict[str, Any] | None = None,
extraglobs: dict[str, Any] | None = None,
test_finder: DocTestFinder | None = None,
**options: Any,
) -> _DocTestSuite: ...
class DocFileCase(DocTestCase): ...
def DocFileTest(
path: str,
module_relative: bool = True,
package: None | str | types.ModuleType = None,
globs: dict[str, Any] | None = None,
parser: DocTestParser = ...,
encoding: str | None = None,
**options: Any,
) -> DocFileCase: ...
def DocFileSuite(*paths: str, **kw: Any) -> _DocTestSuite: ...
def script_from_examples(s: str) -> str: ...
def testsource(module: None | str | types.ModuleType, name: str) -> str: ...
def debug_src(src: str, pm: bool = False, globs: dict[str, Any] | None = None) -> None: ...
def debug_script(src: str, pm: bool = False, globs: dict[str, Any] | None = None) -> None: ...
def debug(module: None | str | types.ModuleType, name: str, pm: bool = False) -> None: ... |
298,628 | no zero | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Connected Components."""
import tensorflow as tf
from tensorflow_addons.utils import types
from tensorflow_addons.utils.resource_loader import LazySO
from typing import Optional, Text
_image_so = LazySO("custom_ops/image/_image_ops.so")
@tf.function
def connected_components(
images: types.TensorLike, name: Optional[Text] = None
) -> tf.Tensor:
"""Labels the connected components in a batch of images.
A component is a set of pixels in a single input image, which are
all adjacent and all have the same non-zero value. The components
using a squared connectivity of one (all equal entries are joined with
their neighbors above,below, left, and right). Components across all
images have consecutive ids 1 through n.
Components are labeled according to the first pixel of the
component appearing in row-major order (lexicographic order by
image_index_in_batch, row, col).
Zero entries all have an output id of 0.
This op is equivalent with `scipy.ndimage.measurements.label`
on a 2D array with the default structuring element
(which is the connectivity used here).
Args:
images: A 2D (H, W) or 3D (N, H, W) `Tensor` of image (integer,
floating point and boolean types are supported).
name: The name of the op.
Returns:
Components with the same shape as `images`.
entries that evaluate to False (e.g. 0/0.0f, False) in `images` have
value 0, and all other entries map to a component id > 0.
Raises:
TypeError: if `images` is not 2D or 3D.
"""
with tf.name_scope(name or "connected_components"):
image_or_images = tf.convert_to_tensor(images, name="images")
if len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images
else:
raise TypeError(
"images should have rank 2 (HW) or 3 (NHW). Static shape is %s"
% image_or_images.get_shape()
)
components = _image_so.ops.addons_image_connected_components(images)
# TODO(ringwalt): Component id renaming should be done in the op,
# to avoid constructing multiple additional large tensors.
components_flat = tf.reshape(components, [-1])
unique_ids, id_index = tf.unique(components_flat)
id_is_zero = tf.where(tf.equal(unique_ids, 0))[:, 0]
# Map each nonzero id to consecutive values.
nonzero_consecutive_ids = (
tf.range(tf.shape(unique_ids)[0] - tf.shape(id_is_zero)[0]) + 1
)
def METHOD_NAME():
# No need to insert a zero into the ids.
return nonzero_consecutive_ids
def has_zero():
# Insert a zero in the consecutive ids
# where zero appears in unique_ids.
# id_is_zero has length 1.
zero_id_ind = tf.cast(id_is_zero[0], tf.int32)
ids_before = nonzero_consecutive_ids[:zero_id_ind]
ids_after = nonzero_consecutive_ids[zero_id_ind:]
return tf.concat([ids_before, [0], ids_after], axis=0)
new_ids = tf.cond(tf.equal(tf.shape(id_is_zero)[0], 0), METHOD_NAME, has_zero)
components = tf.reshape(tf.gather(new_ids, id_index), tf.shape(components))
if len(image_or_images.get_shape()) == 2:
return components[0, :, :]
else:
return components |
298,629 | test lj system | import numpy as np
import pytest
import unyt as u
from unyt.testing import assert_allclose_units
from gmso import Topology
from gmso.tests.base_test import BaseTest
from gmso.tests.utils import get_path
from gmso.utils.io import get_fn
class TestMol2(BaseTest):
def test_read_mol2(self):
top = Topology.load(get_fn("parmed.mol2"))
assert top.name == "parmed"
assert top.n_sites == 8
assert_allclose_units(
top.box.lengths,
([8.2693, 7.9100, 6.6460] * u.Å).to("nm"),
rtol=1e-5,
atol=1e-8,
)
assert list(top.sites)[0].element.name == "carbon"
assert_allclose_units(
list(top.sites)[0].element.mass,
np.array(1.9944733e-26) * u.kg,
rtol=1e-5,
atol=1e-8,
)
top = Topology.load(get_fn("tip3p.mol2"))
assert top.name == "tip3p"
assert top.n_sites == 3
assert_allclose_units(
top.box.lengths, 3.0130 * np.ones(3) * u.Å, rtol=1e-5, atol=1e-8
)
positions_check = [
[0.061, 0.1, 0.1],
[0.017, 0.09, 0.177],
[0.011, 0.154, 0.04],
]
for check, site in zip(positions_check, top.sites):
assert_allclose_units(
site.position,
check * u.nm,
rtol=1e-5,
atol=1e-8,
)
top = Topology.load(get_fn("vmd.mol2"))
assert top.name == "vmd"
assert top.n_sites == 6
assert len(top.bonds) == 5
assert top.bonds[0].connection_members[0] == top.sites[0]
assert top.box == None
with pytest.warns(
UserWarning,
match=r"No charge was detected for site C with index 1",
):
top = Topology.load(get_fn("ethane.mol2"), verbose=True)
assert list(top.sites)[0].charge is None
def test_residue(self):
top = Topology.load(get_fn("ethanol_aa.mol2"))
assert np.all([site.residue[0] == "ETO" for site in top.sites])
assert np.all([site.residue[1] == 1 for site in top.sites])
top = Topology.load(get_fn("benzene_ua.mol2"), site_type="lj")
assert np.all(
[
site.residue[0] == "BEN1"
for site in top.iter_sites_by_residue("BEN1")
]
)
assert np.all(
[site.residue[1] == 1 for site in top.iter_sites_by_residue("BEN1")]
)
assert np.all(
[
site.residue[0] == "BEN2"
for site in top.iter_sites_by_residue("BEN2")
]
)
assert np.all(
[site.residue[1] == 2 for site in top.iter_sites_by_residue("BEN2")]
)
def METHOD_NAME(self):
top = Topology.load(get_fn("methane.mol2"), site_type="lj")
assert np.all([site.element == None for site in top.sites])
def test_no_charge_lj(self):
with pytest.warns(
UserWarning,
match=r"No charge was detected for site .* with index \d+$",
):
top = Topology.load(
get_path("methane_missing_charge.mol2"),
site_type="lj",
verbose=True,
)
def test_wrong_path(self):
with pytest.raises(
OSError, match=r"Provided path to file that does not exist"
):
Topology.load("not_a_file.mol2")
top = Topology.load(get_fn("ethanegro.mol2"))
assert len(top.sites) == 0
assert len(top.bonds) == 0
def test_broken_files(self):
with pytest.warns(
UserWarning,
match=r"The record type indicator @<TRIPOS>MOLECULE_extra_text is not supported. Skipping current section and moving to the next RTI header.",
):
Topology.load(get_fn("broken.mol2"))
with pytest.warns(
UserWarning,
match=r"This mol2 file has two boxes to be read in, only reading in one with dimensions Box\(a=0.72",
):
Topology.load(get_fn("broken.mol2"), verbose=True)
def test_benzene_mol2_elements(self):
top = Topology.load(get_fn("benzene.mol2"))
for atom in top.sites:
assert atom.element.name in {"hydrogen", "carbon"}
def test_neopentane_mol2_elements(self):
with pytest.warns(
UserWarning,
match=r"No element detected for site .+ with index \d+, "
r"consider manually adding the element to the topology$",
):
top = Topology.load(get_fn("neopentane.mol2"), verbose=True)
def test_mol2_residues(self):
top = Topology.load(get_fn("parmed.mol2"))
assert np.all(
np.array([site.residue.name for site in top.sites]) == "RES"
)
assert np.all(
np.array([site.residue.number for site in top.sites]) == 1
)
def test_mol2_molecules(self):
top = Topology.load(get_fn("methane.mol2"))
assert np.all(
np.array([site.molecule.name for site in top.sites]) == "MET"
)
assert np.all(
np.array([site.molecule.number for site in top.sites]) == 1
)
def test_mol2_group(self):
# Is there a place to read from mol2 file?
top = Topology.load(get_fn("ethane.mol2"))
for site in top.sites:
site.group = "ethane"
assert np.all(np.array([site.group for site in top.sites]) == "ethane") |
298,630 | turret sorter | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import math
from collections import OrderedDict
import wx
from eos.const import FittingHardpoint
from eos.saveddata.module import Module
from eos.utils.stats import DmgTypes
from service.market import Market
_t = wx.GetTranslation
class Ammo:
instance = None
@classmethod
def getInstance(cls):
if cls.instance is None:
cls.instance = Ammo()
return cls.instance
@staticmethod
def getModuleFlatAmmo(mod):
sMkt = Market.getInstance()
if mod is None or mod.isEmpty:
return set()
chargeSet = set()
# Do not try to grab it for t3d modes which can also be passed as part of selection
if isinstance(mod, Module):
for charge in mod.getValidCharges():
if sMkt.getPublicityByItem(charge):
chargeSet.add(charge)
return chargeSet
@classmethod
def getModuleStructuredAmmo(cls, mod, ammo=None):
chargesFlat = cls.getModuleFlatAmmo(mod) if ammo is None else ammo
# Make sure we do not consider mining turrets as combat turrets
if mod.hardpoint == FittingHardpoint.TURRET and not mod.getModifiedItemAttr('miningAmount'):
def METHOD_NAME(charge):
damage = 0
range_ = (mod.item.getAttribute('maxRange')) * \
(charge.getAttribute('weaponRangeMultiplier') or 1)
falloff = (mod.item.getAttribute('falloff') or 0) * \
(charge.getAttribute('fallofMultiplier') or 1)
for type_ in DmgTypes.names():
d = charge.getAttribute('%sDamage' % type_)
if d > 0:
damage += d
# Take optimal and falloff as range factor
rangeFactor = range_ + falloff
return -rangeFactor, charge.typeName.rsplit()[-2:], damage, charge.name
all = OrderedDict()
sub = []
prevNameBase = None
prevRange = None
for charge in sorted(chargesFlat, key=METHOD_NAME):
if 'civilian' in charge.typeName.lower():
continue
currNameBase = ' '.join(charge.typeName.rsplit()[-2:])
currRange = charge.getAttribute('weaponRangeMultiplier')
if sub and (currRange != prevRange or currNameBase != prevNameBase):
all[sub[0].name] = sub
sub = []
sub.append(charge)
prevNameBase = currNameBase
prevRange = currRange
else:
if sub:
all[sub[0].name] = sub
return 'ddTurret', all
elif mod.hardpoint == FittingHardpoint.MISSILE and mod.item.name != 'Festival Launcher':
def getChargeDamageInfo(charge):
# Set up data storage for missile damage stuff
damageMap = {}
totalDamage = 0
# Fill them with the data about charge
for damageType in DmgTypes.names():
currentDamage = charge.getAttribute('{}Damage'.format(damageType)) or 0
damageMap[damageType] = currentDamage
totalDamage += currentDamage
# Detect type of ammo
chargeDamageType = None
for damageType in damageMap:
# If all damage belongs to certain type purely, set appropriate
# ammoType
if damageMap[damageType] == totalDamage:
chargeDamageType = damageType
break
# Else consider ammo as mixed damage
if chargeDamageType is None:
chargeDamageType = 'mixed'
return chargeDamageType, totalDamage
def missileSorter(charge):
# Get charge damage type and total damage
chargeDamageType, totalDamage = getChargeDamageInfo(charge)
# Find its position in sort list
try:
position = DmgTypes.names().index(chargeDamageType)
# Put charges which have non-standard damage type after charges with
# standard damage type
except ValueError:
position = math.inf
return position, totalDamage, charge.name
all = OrderedDict()
sub = []
prevType = None
for charge in sorted(chargesFlat, key=missileSorter):
currType = getChargeDamageInfo(charge)[0]
if sub and currType != prevType:
all[prevType] = sub
sub = []
sub.append(charge)
prevType = currType
else:
if sub:
all[prevType] = sub
return 'ddMissile', all
elif mod.item.group.name == 'Frequency Mining Laser':
def crystalSorter(charge):
if charge.name.endswith(' II'):
techLvl = 2
elif charge.name.endswith(' I'):
techLvl = 1
else:
techLvl = 0
if ' A ' in charge.name:
type_ = 'A'
elif ' B ' in charge.name:
type_ = 'B'
elif ' C ' in charge.name:
type_ = 'C'
else:
type_ = '0'
return type_, techLvl, charge.name
typeMap = {
253: 'a1',
254: 'a2',
255: 'a3',
256: 'a4',
257: 'a5',
258: 'a6',
259: 'r4',
260: 'r8',
261: 'r16',
262: 'r32',
263: 'r64'}
prelim = {}
for charge in chargesFlat:
oreTypeList = charge.getAttribute('specializationAsteroidTypeList')
category = typeMap.get(oreTypeList, _t('Misc'))
prelim.setdefault(category, set()).add(charge)
final = OrderedDict()
for category, charges in prelim.items():
final[category] = sorted(charges, key=crystalSorter)
return 'miner', final
else:
def nameSorter(charge):
parts = charge.name.split(" ")
return [int(p) if p.isdigit() else p for p in parts]
return 'general', {'general': sorted(chargesFlat, key=nameSorter)} |
298,631 | test split complexarray | #! /usr/bin/env python3
"""test split"""
# --- import --------------------------------------------------------------------------------------
import numpy as np
import WrightTools as wt
from WrightTools import datasets
import pathlib
# --- test ----------------------------------------------------------------------------------------
here = pathlib.Path(__file__).parent
def test_split():
p = datasets.PyCMDS.wm_w2_w1_000
a = wt.data.from_PyCMDS(p)
exprs = a.axis_expressions
split = a.split(0, [19700], units="wn")
assert exprs == a.axis_expressions
assert exprs == split[0].axis_expressions
assert len(split) == 2
print(split[0].shape)
assert split[0].shape == (14, 11, 11)
assert split[1].shape == (21, 11, 11)
assert a.units == split[0].units
a.close()
split.close()
def METHOD_NAME():
p = datasets.PyCMDS.wm_w2_w1_000
a = wt.data.from_PyCMDS(p)
a.create_channel(name="complex", values=np.complex128(a.channels[0][:]), dtype=np.complex128)
split = a.split(0, [19700], units="wn")
assert len(split) == 2
assert split[0].shape == (14, 11, 11)
assert split[1].shape == (21, 11, 11)
assert split[0].complex[:].dtype == np.complex128
assert a.units == split[0].units
a.close()
split.close()
def test_split_edge():
p = datasets.PyCMDS.wm_w2_w1_000
a = wt.data.from_PyCMDS(p)
split = a.split(0, [20800], units="wn")
assert len(split) == 2
assert split[0].shape == (35, 11, 11)
assert split[1].shape == ()
a.close()
split.close()
def test_split_multiple():
p = datasets.PyCMDS.wm_w2_w1_000
a = wt.data.from_PyCMDS(p)
split = a.split(0, [20605, 19705], units="wn")
assert len(split) == 3
assert split[2].shape == (2, 11, 11)
assert split[1].shape == (18, 11, 11)
assert split[0].shape == (15, 11, 11)
a.close()
split.close()
def test_split_close():
p = datasets.PyCMDS.wm_w2_w1_000
a = wt.data.from_PyCMDS(p)
split = a.split(0, [19705, 19702], units="wn")
assert len(split) == 3
assert split[0].shape == (15, 11, 11)
assert split[1].shape == ()
assert split[2].shape == (20, 11, 11)
a.close()
split.close()
def test_split_units():
p = datasets.PyCMDS.wm_w2_w1_000
a = wt.data.from_PyCMDS(p)
split = a.split(0, [507], units="nm")
assert len(split) == 2
assert split[0].shape == (20, 11, 11)
assert split[1].shape == (15, 11, 11)
a.close()
split.close()
def test_split_axis_name():
p = datasets.PyCMDS.wm_w2_w1_000
a = wt.data.from_PyCMDS(p)
split = a.split("w2", [1555])
split.print_tree()
assert len(split) == 2
assert split[1].shape == (35, 10, 11)
assert split[0].shape == (35, 11)
assert split[0].axis_expressions == ("wm", "w1")
a.close()
split.close()
def test_split_constant():
p = datasets.PyCMDS.wm_w2_w1_000
a = wt.data.from_PyCMDS(p)
split = a.split(1, [1555])
split.print_tree()
assert len(split) == 2
assert split[0].shape == (35, 11)
a.close()
split.close()
def test_split_parent():
p = datasets.PyCMDS.wm_w2_w1_000
a = wt.data.from_PyCMDS(p)
parent = wt.Collection()
split = a.split(1, [1500], parent=parent)
assert "split" in parent
assert split.filepath == parent.filepath
assert len(split) == 2
a.close()
parent.close()
def test_split_expression():
p = datasets.PyCMDS.wm_w2_w1_000
a = wt.data.from_PyCMDS(p)
split = a.split("w1+w2", 3150, units="wn")
assert len(split) == 2
assert split[0].shape == (35, 10, 10)
assert split[1].shape == (35, 11, 11)
a.close()
split.close()
def test_split_hole():
data = wt.Data()
data.create_variable("x", np.linspace(-5, 5, 100)[:, None])
data.create_variable("y", np.linspace(-5, 5, 100)[None, :])
data.create_variable("z", np.exp(-data.x[:] ** 2) * np.exp(-data.y[:] ** 2))
split = data.split("z", 0.5)
assert len(split) == 2
assert split[0].shape == (100, 100)
assert split[1].shape == (16, 16)
data.close()
split.close()
def test_split_constants():
d = wt.Data()
d.create_variable("x", np.linspace(0, 10, 11)[:, None])
d.create_variable("y", np.linspace(0, 10, 11)[None, :])
d.create_channel("z", d.x[:] * d.y[:])
d.transform("x", "y")
split = d.split("x-y", [-0.1, 0.1, 9.9])
assert len(split) == 4
assert split[0].constant_expressions == ()
assert split[1].constant_expressions == ("x-y",)
assert split[1].constants[0].value == 0
assert split[1].constants[0].std == 0
assert split[1].constants[0].shape == (11, 11)
assert split[2].constant_expressions == ()
assert split[3].constant_expressions == ("x", "y", "x-y")
d.close()
split.close()
def test_autotune():
p = here / "test_data" / "autotune.data"
data = wt.data.from_PyCMDS(str(p))
data.transform("w2", "w2_BBO")
split = data.split("w2_BBO", [42])
split.close()
data.transform("w2_BBO", "w2")
split = data.split("w2_BBO", [42, 41.5])
split.close()
data.close()
if __name__ == "__main__":
test_split()
test_split_edge()
test_split_multiple()
test_split_close()
test_split_units()
test_split_axis_name()
test_split_constant()
test_split_parent()
test_split_expression()
test_split_hole()
test_split_constants()
METHOD_NAME()
test_autotune() |
298,632 | h4 | import sys
import unittest
import io
import atexit
from test import support
### helpers
def h1():
print("h1")
def h2():
print("h2")
def h3():
print("h3")
def METHOD_NAME(*args, **kwargs):
print("h4", args, kwargs)
def raise1():
raise TypeError
def raise2():
raise SystemError
class GeneralTest(unittest.TestCase):
def setUp(self):
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.stream = io.StringIO()
sys.stdout = sys.stderr = self.stream
atexit._clear()
def tearDown(self):
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
atexit._clear()
def test_args(self):
# be sure args are handled properly
atexit.register(h1)
atexit.register(METHOD_NAME)
atexit.register(METHOD_NAME, 4, kw="abc")
atexit._run_exitfuncs()
self.assertEqual(self.stream.getvalue(),
"h4 (4,) {'kw': 'abc'}\nh4 () {}\nh1\n")
def test_badargs(self):
atexit.register(lambda: 1, 0, 0, (x for x in (1,2)), 0, 0)
self.assertRaises(TypeError, atexit._run_exitfuncs)
def test_order(self):
# be sure handlers are executed in reverse order
atexit.register(h1)
atexit.register(h2)
atexit.register(h3)
atexit._run_exitfuncs()
self.assertEqual(self.stream.getvalue(), "h3\nh2\nh1\n")
def test_raise(self):
# be sure raises are handled properly
atexit.register(raise1)
atexit.register(raise2)
self.assertRaises(TypeError, atexit._run_exitfuncs)
def test_raise_unnormalized(self):
# Issue #10756: Make sure that an unnormalized exception is
# handled properly
atexit.register(lambda: 1 / 0)
self.assertRaises(ZeroDivisionError, atexit._run_exitfuncs)
self.assertIn("ZeroDivisionError", self.stream.getvalue())
def test_print_tracebacks(self):
# Issue #18776: the tracebacks should be printed when errors occur.
def f():
1/0 # one
def g():
1/0 # two
def h():
1/0 # three
atexit.register(f)
atexit.register(g)
atexit.register(h)
self.assertRaises(ZeroDivisionError, atexit._run_exitfuncs)
stderr = self.stream.getvalue()
self.assertEqual(stderr.count("ZeroDivisionError"), 3)
self.assertIn("# one", stderr)
self.assertIn("# two", stderr)
self.assertIn("# three", stderr)
def test_stress(self):
a = [0]
def inc():
a[0] += 1
for i in range(128):
atexit.register(inc)
atexit._run_exitfuncs()
self.assertEqual(a[0], 128)
def test_clear(self):
a = [0]
def inc():
a[0] += 1
atexit.register(inc)
atexit._clear()
atexit._run_exitfuncs()
self.assertEqual(a[0], 0)
def test_unregister(self):
a = [0]
def inc():
a[0] += 1
def dec():
a[0] -= 1
for i in range(4):
atexit.register(inc)
atexit.register(dec)
atexit.unregister(inc)
atexit._run_exitfuncs()
self.assertEqual(a[0], -1)
def test_bound_methods(self):
l = []
atexit.register(l.append, 5)
atexit._run_exitfuncs()
self.assertEqual(l, [5])
atexit.unregister(l.append)
atexit._run_exitfuncs()
self.assertEqual(l, [5])
class SubinterpreterTest(unittest.TestCase):
def test_callbacks_leak(self):
# This test shows a leak in refleak mode if atexit doesn't
# take care to free callbacks in its per-subinterpreter module
# state.
n = atexit._ncallbacks()
code = r"""if 1:
import atexit
def f():
pass
atexit.register(f)
del atexit
"""
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(atexit._ncallbacks(), n)
def test_callbacks_leak_refcycle(self):
# Similar to the above, but with a refcycle through the atexit
# module.
n = atexit._ncallbacks()
code = r"""if 1:
import atexit
def f():
pass
atexit.register(f)
atexit.__atexit = atexit
"""
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(atexit._ncallbacks(), n)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main() |
298,633 | get folder objects | import re
import sys
from datetime import datetime
from pathlib import Path
from loguru import logger
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
logger = logger.bind(name='filesystem')
class Filesystem:
"""
Uses local path content as an input. Can use recursion if configured.
Recursion is False by default. Can be configured to true or get integer that will specify max depth in relation to
base folder.
All files/dir/symlinks are retrieved by default. Can be changed by using the 'retrieve' property.
Example 1:: Single path
filesystem: /storage/movies/
Example 2:: List of paths
filesystem:
- /storage/movies/
- /storage/tv/
Example 3:: Object with list of paths
filesystem:
path:
- /storage/movies/
- /storage/tv/
mask: '*.mkv'
Example 4::
filesystem:
path:
- /storage/movies/
- /storage/tv/
recursive: 4 # 4 levels deep from each base folder
retrieve: files # Only files will be retrieved
Example 5::
filesystem:
path:
- /storage/movies/
- /storage/tv/
recursive: yes # No limit to depth, all sub dirs will be accessed
retrieve: # Only files and dirs will be retrieved
- files
- dirs
"""
retrieval_options = ['files', 'dirs', 'symlinks']
paths = one_or_more({'type': 'string', 'format': 'path'}, unique_items=True)
schema = {
'oneOf': [
paths,
{
'type': 'object',
'properties': {
'path': paths,
'mask': {'type': 'string'},
'regexp': {'type': 'string', 'format': 'regex'},
'recursive': {
'oneOf': [{'type': 'integer', 'minimum': 2}, {'type': 'boolean'}]
},
'retrieve': one_or_more(
{'type': 'string', 'enum': retrieval_options}, unique_items=True
),
},
'required': ['path'],
'additionalProperties': False,
},
]
}
def prepare_config(self, config):
from fnmatch import translate
config = config
# Converts config to a dict with a list of paths
if not isinstance(config, dict):
config = {'path': config}
if not isinstance(config['path'], list):
config['path'] = [config['path']]
config.setdefault('recursive', False)
# If mask was specified, turn it in to a regexp
if config.get('mask'):
config['regexp'] = translate(config['mask'])
# If no mask or regexp specified, accept all files
config.setdefault('regexp', '.')
# Sets the default retrieval option to files
config.setdefault('retrieve', self.retrieval_options)
return config
def create_entry(self, filepath: Path, test_mode):
"""
Creates a single entry using a filepath and a type (file/dir)
"""
filepath = filepath.absolute()
entry = Entry()
entry['location'] = str(filepath)
entry['url'] = Path(filepath).absolute().as_uri()
entry['filename'] = filepath.name
if filepath.is_file():
entry['title'] = filepath.stem
else:
entry['title'] = filepath.name
file_stat = filepath.stat()
try:
entry['timestamp'] = datetime.fromtimestamp(file_stat.st_mtime)
except Exception as e:
logger.warning('Error setting timestamp for {}: {}', filepath, e)
entry['timestamp'] = None
entry['accessed'] = datetime.fromtimestamp(file_stat.st_atime)
entry['modified'] = datetime.fromtimestamp(file_stat.st_mtime)
entry['created'] = datetime.fromtimestamp(file_stat.st_ctime)
if entry.isvalid():
if test_mode:
logger.info("Test mode. Entry includes:")
logger.info(' Title: {}', entry['title'])
logger.info(' URL: {}', entry['url'])
logger.info(' Filename: {}', entry['filename'])
logger.info(' Location: {}', entry['location'])
logger.info(' Timestamp: {}', entry['timestamp'])
return entry
else:
logger.error('Non valid entry created: {} ', entry)
return
def get_max_depth(self, recursion, base_depth):
if recursion is False:
return base_depth + 1
elif recursion is True:
return float('inf')
else:
return base_depth + recursion
@staticmethod
def METHOD_NAME(folder: Path, recursion: bool):
return folder.rglob('*') if recursion else folder.iterdir()
def get_entries_from_path(
self, path_list, match, recursion, test_mode, get_files, get_dirs, get_symlinks
):
entries = []
for folder in path_list:
logger.verbose('Scanning folder {}. Recursion is set to {}.', folder, recursion)
folder = Path(folder).expanduser()
if not folder.exists():
logger.error('{} does not exist (anymore.)', folder)
continue
logger.debug('Scanning {}', folder)
base_depth = len(folder.parts)
max_depth = self.get_max_depth(recursion, base_depth)
folder_objects = self.METHOD_NAME(folder, recursion)
for path_object in folder_objects:
logger.debug('Checking if {} qualifies to be added as an entry.', path_object)
try:
path_object.exists()
except UnicodeError:
logger.error(
'File {} not decodable with filesystem encoding: {}',
path_object,
sys.getfilesystemencoding(),
)
continue
entry = None
object_depth = len(path_object.parts)
if object_depth <= max_depth:
if match(str(path_object)):
if (
(path_object.is_dir() and get_dirs)
or (path_object.is_symlink() and get_symlinks)
or (
path_object.is_file()
and not path_object.is_symlink()
and get_files
)
):
entry = self.create_entry(path_object, test_mode)
else:
logger.debug(
"Path object's {} type doesn't match requested object types.",
path_object,
)
if entry and entry not in entries:
entries.append(entry)
return entries
def on_task_input(self, task, config):
config = self.prepare_config(config)
path_list = config['path']
test_mode = task.options.test
match = re.compile(config['regexp'], re.IGNORECASE).match
recursive = config['recursive']
get_files = 'files' in config['retrieve']
get_dirs = 'dirs' in config['retrieve']
get_symlinks = 'symlinks' in config['retrieve']
logger.verbose('Starting to scan folders.')
return self.get_entries_from_path(
path_list, match, recursive, test_mode, get_files, get_dirs, get_symlinks
)
@event('plugin.register')
def register_plugin():
plugin.register(Filesystem, 'filesystem', api_ver=2) |
298,634 | get operation | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1, operations_v1
from google.api_core import retry as retries
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.resourcemanager_v3 import gapic_version as package_version
from google.cloud.resourcemanager_v3.types import tag_holds
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=package_version.__version__
)
class TagHoldsTransport(abc.ABC):
"""Abstract transport class for TagHolds."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
)
DEFAULT_HOST: str = "cloudresourcemanager.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Don't apply audience if the credentials file passed from user.
if hasattr(credentials, "with_gdch_audience"):
credentials = credentials.with_gdch_audience(
api_audience if api_audience else host
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_tag_hold: gapic_v1.method.wrap_method(
self.create_tag_hold,
default_timeout=None,
client_info=client_info,
),
self.delete_tag_hold: gapic_v1.method.wrap_method(
self.delete_tag_hold,
default_timeout=None,
client_info=client_info,
),
self.list_tag_holds: gapic_v1.method.wrap_method(
self.list_tag_holds,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_tag_hold(
self,
) -> Callable[
[tag_holds.CreateTagHoldRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_tag_hold(
self,
) -> Callable[
[tag_holds.DeleteTagHoldRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def list_tag_holds(
self,
) -> Callable[
[tag_holds.ListTagHoldsRequest],
Union[
tag_holds.ListTagHoldsResponse, Awaitable[tag_holds.ListTagHoldsResponse]
],
]:
raise NotImplementedError()
@property
def METHOD_NAME(
self,
) -> Callable[
[operations_pb2.GetOperationRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def kind(self) -> str:
raise NotImplementedError()
__all__ = ("TagHoldsTransport",) |
298,635 | repair | # -*- coding: utf-8 -*-
#
# This file is part of SKALE Admin
#
# Copyright (C) 2019 SKALE Labs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
from flask import Blueprint, g, request
from core.schains.config.file_manager import ConfigFileManager
from core.schains.config.helper import (
get_base_port_from_config,
get_node_ips_from_config,
get_own_ip_from_config
)
from core.schains.firewall.utils import (
get_default_rule_controller,
get_sync_agent_ranges
)
from core.schains.skaled_status import init_skaled_status
from core.schains.ima import get_ima_version
from core.schains.info import get_schain_info_by_name, get_skaled_version
from core.schains.cleaner import get_schains_on_node
from web.models.schain import get_schains_statuses, toggle_schain_repair_mode
from web.helper import (
construct_ok_response,
construct_err_response,
construct_key_error_response,
get_api_url,
g_skale
)
logger = logging.getLogger(__name__)
BLUEPRINT_NAME = 'schains'
schains_bp = Blueprint(BLUEPRINT_NAME, __name__)
@schains_bp.route(get_api_url(BLUEPRINT_NAME, 'statuses'), methods=['GET'])
def schain_statuses():
logger.debug(request)
node_id = g.config.id
if node_id is None:
return construct_err_response(msg='No node installed')
schains_on_node = get_schains_on_node(g.docker_utils)
statuses = {}
for schain_name in schains_on_node:
skaled_status = init_skaled_status(schain_name)
statuses[schain_name] = skaled_status.all
return construct_ok_response(statuses)
@schains_bp.route(get_api_url(BLUEPRINT_NAME, 'config'), methods=['GET'])
def schain_config():
logger.debug(request)
key = 'schain_name'
schain_name = request.args.get(key)
if not schain_name:
return construct_key_error_response([key])
config = ConfigFileManager(schain_name).skaled_config
if config is None:
return construct_err_response(
msg=f'sChain config not found: {schain_name}'
)
skale_config = config['skaleConfig']
return construct_ok_response(skale_config)
@schains_bp.route(get_api_url(BLUEPRINT_NAME, 'list'), methods=['GET'])
@g_skale
def schains_list():
logger.debug(request)
logger.debug(request)
node_id = g.config.id
if node_id is None:
return construct_err_response(msg='No node installed')
schains_list = list(filter(
lambda s: s.get('name'),
g.skale.schains.get_schains_for_node(node_id)
))
return construct_ok_response(schains_list)
@schains_bp.route(get_api_url(BLUEPRINT_NAME, 'dkg-statuses'), methods=['GET'])
def dkg_statuses():
logger.debug(request)
_all = request.args.get('all') == 'True'
dkg_statuses = get_schains_statuses(_all)
return construct_ok_response(dkg_statuses)
@schains_bp.route(get_api_url(BLUEPRINT_NAME, 'firewall-rules'), methods=['GET'])
@g_skale
def firewall_rules():
logger.debug(request)
schain_name = request.args.get('schain_name')
sync_agent_ranges = get_sync_agent_ranges(g.skale)
cfm = ConfigFileManager(schain_name)
if not cfm.skaled_config_exists:
return construct_err_response(
msg=f'No schain with name {schain_name}'
)
conf = cfm.skaled_config
base_port = get_base_port_from_config(conf)
node_ips = get_node_ips_from_config(conf)
own_ip = get_own_ip_from_config(conf)
rc = get_default_rule_controller(
schain_name,
base_port,
own_ip,
node_ips,
sync_agent_ranges
)
endpoints = [e._asdict() for e in rc.actual_rules()]
return construct_ok_response({'endpoints': endpoints})
@schains_bp.route(get_api_url(BLUEPRINT_NAME, 'repair'), methods=['POST'])
def METHOD_NAME():
logger.debug(request)
schain_name = request.json.get('schain_name')
snapshot_from = request.json.get('snapshot_from', '')
result = toggle_schain_repair_mode(
schain_name, snapshot_from=snapshot_from)
if result:
return construct_ok_response()
else:
return construct_err_response(
msg=f'No schain with name {schain_name}'
)
@schains_bp.route(get_api_url(BLUEPRINT_NAME, 'get'), methods=['GET'])
@g_skale
def get_schain():
logger.debug(request)
schain_name = request.args.get('schain_name')
info = get_schain_info_by_name(g.skale, schain_name)
if not info:
return construct_err_response(
msg=f'No schain with name {schain_name}'
)
response = info.to_dict()
return construct_ok_response(response)
@schains_bp.route(get_api_url(BLUEPRINT_NAME, 'container-versions'), methods=['GET'])
def schain_containers_versions():
logger.debug(request)
version_data = {
'skaled_version': get_skaled_version(),
'ima_version': get_ima_version()
}
return construct_ok_response(version_data) |
298,636 | hash | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import contextlib
import hashlib
import importlib
import os
import shutil
import tempfile
from hashlib import sha1
from site import makepath # type: ignore[attr-defined]
from pex import hashing
from pex.common import filter_pyc_dirs, filter_pyc_files, safe_mkdir, safe_mkdtemp
from pex.compatibility import ( # type: ignore[attr-defined] # `exec_function` is defined dynamically
PY2,
exec_function,
)
from pex.orderedset import OrderedSet
from pex.typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import IO, Any, Callable, Iterator, Optional
from pex.hashing import Hasher
class DistributionHelper(object):
# TODO(#584: This appears unused, but clients might still use it. We cannot remove until we
# have a deprecation policy.
@classmethod
def access_zipped_assets(cls, static_module_name, static_path, dir_location=None):
# type: (str, str, Optional[str]) -> str
"""Create a copy of static resource files as we can't serve them from within the pex file.
:param static_module_name: Module name containing module to cache in a tempdir
:param static_path: Module name, for example 'serverset'
:param dir_location: create a new temporary directory inside, or None to have one created
:returns temp_dir: Temporary directory with the zipped assets inside
"""
if dir_location is None:
temp_dir = safe_mkdtemp()
else:
temp_dir = dir_location
module = importlib.import_module(static_module_name)
# N.B.: This handles namespace packages new and old.
paths = OrderedSet(os.path.realpath(d) for d in getattr(module, "__path__", []))
if module.__file__:
# And this handles old-style __init__.py packages.
paths.add(os.path.realpath(module.__file__))
safe_mkdir(temp_dir)
for path in paths:
resource_dir = os.path.realpath(os.path.join(path, static_path))
if os.path.isdir(resource_dir):
for root, dirs, files in os.walk(resource_dir):
for d in dirs:
safe_mkdir(
os.path.join(
temp_dir, os.path.relpath(os.path.join(root, d), resource_dir)
)
)
for f in files:
src = os.path.join(root, f)
shutil.copy(src, os.path.join(temp_dir, os.path.relpath(src, resource_dir)))
return temp_dir
class CacheHelper(object):
@classmethod
def METHOD_NAME(cls, path, digest=None, hasher=sha1):
# type: (str, Optional[Hasher], Callable[[], Hasher]) -> str
"""Return the digest of a single file in a memory-efficient manner."""
if digest is None:
digest = hasher()
hashing.file_hash(path, digest)
return digest.hexdigest()
@classmethod
def pex_code_hash(cls, directory):
# type: (str) -> str
"""Return a reproducible hash of the contents of a loose PEX; excluding all `.pyc` files."""
digest = hashlib.sha1()
hashing.dir_hash(
directory=directory,
digest=digest,
dir_filter=filter_pyc_dirs,
file_filter=lambda files: (f for f in filter_pyc_files(files) if not f.startswith(".")),
)
return digest.hexdigest()
@classmethod
def dir_hash(cls, directory, digest=None, hasher=sha1):
# type: (str, Optional[Hasher], Callable[[], Hasher]) -> str
"""Return a reproducible hash of the contents of a directory; excluding all `.pyc` files."""
if digest is None:
digest = hasher()
hashing.dir_hash(
directory=directory,
digest=digest,
dir_filter=filter_pyc_dirs,
file_filter=filter_pyc_files,
)
return digest.hexdigest()
@classmethod
def zip_hash(
cls,
zip_path, # type: str
relpath=None, # type: Optional[str]
):
# type: (...) -> str
"""Return a reproducible hash of the contents of a zip; excluding all `.pyc` files."""
digest = hashlib.sha1()
hashing.zip_hash(
zip_path=zip_path,
digest=digest,
relpath=relpath,
dir_filter=filter_pyc_dirs,
file_filter=filter_pyc_files,
)
return digest.hexdigest()
@contextlib.contextmanager
def named_temporary_file(**kwargs):
# type: (**Any) -> Iterator[IO]
"""Due to a bug in python (https://bugs.python.org/issue14243), we need this to be able to use
the temporary file without deleting it."""
assert "delete" not in kwargs
kwargs["delete"] = False
fp = tempfile.NamedTemporaryFile(**kwargs)
try:
with fp:
yield fp
finally:
os.remove(fp.name) |
298,637 | main | """
PAH Diagnosis from Cardiac MRI via a Multilinear PCA-based Pipeline
Reference:
Swift, A. J., Lu, H., Uthoff, J., Garg, P., Cogliano, M., Taylor, J., ... & Kiely, D. G. (2021). A machine learning
cardiac magnetic resonance approach to extract disease features and automate pulmonary arterial hypertension diagnosis.
European Heart Journal-Cardiovascular Imaging. https://academic.oup.com/ehjcimaging/article/22/2/236/5717931
"""
import argparse
import os
import numpy as np
import pandas as pd
from config import get_cfg_defaults
from sklearn.model_selection import cross_validate
from kale.interpret import model_weights, visualize
from kale.loaddata.image_access import dicom2arraylist, read_dicom_dir
from kale.pipeline.mpca_trainer import MPCATrainer
from kale.prepdata.image_transform import mask_img_stack, normalize_img_stack, reg_img_stack, rescale_img_stack
from kale.utils.download import download_file_by_url
def arg_parse():
"""Parsing arguments"""
parser = argparse.ArgumentParser(description="Machine learning pipeline for PAH diagnosis")
parser.add_argument("--cfg", required=True, help="path to config file", type=str)
args = parser.parse_args()
return args
def METHOD_NAME():
args = arg_parse()
# ---- setup configs ----
cfg = get_cfg_defaults()
cfg.merge_from_file(args.cfg)
cfg.freeze()
print(cfg)
save_figs = cfg.OUTPUT.SAVE_FIG
fig_format = cfg.SAVE_FIG_KWARGS.format
print(f"Save Figures: {save_figs}")
# ---- initialize folder to store images ----
save_figures_location = cfg.OUTPUT.ROOT
print(f"Save Figures: {save_figures_location}")
if not os.path.exists(save_figures_location):
os.makedirs(save_figures_location)
# ---- setup dataset ----
base_dir = cfg.DATASET.BASE_DIR
file_format = cfg.DATASET.FILE_FORAMT
download_file_by_url(cfg.DATASET.SOURCE, cfg.DATASET.ROOT, "%s.%s" % (base_dir, file_format), file_format)
img_path = os.path.join(cfg.DATASET.ROOT, base_dir, cfg.DATASET.IMG_DIR)
patient_dcm_list = read_dicom_dir(img_path, sort_instance=True, sort_patient=True)
images, patient_ids = dicom2arraylist(patient_dcm_list, return_patient_id=True)
patient_ids = np.array(patient_ids, dtype=int)
n_samples = len(images)
mask_path = os.path.join(cfg.DATASET.ROOT, base_dir, cfg.DATASET.MASK_DIR)
mask_dcm = read_dicom_dir(mask_path, sort_instance=True)
mask = dicom2arraylist(mask_dcm, return_patient_id=False)[0][0, ...]
landmark_path = os.path.join(cfg.DATASET.ROOT, base_dir, cfg.DATASET.LANDMARK_FILE)
landmark_df = pd.read_csv(landmark_path, index_col="Subject").loc[patient_ids] # read .csv file as dataframe
landmarks = landmark_df.iloc[:, :-1].values
y = landmark_df["Group"].values
y[np.where(y != 0)] = 1 # convert to binary classification problem, i.e. no PH vs PAH
# plot the first phase of images with landmarks
marker_names = list(landmark_df.columns[1::2])
markers = []
for marker in marker_names:
marker_name = marker.split(" ")
marker_name.pop(-1)
marker_name = " ".join(marker_name)
markers.append(marker_name)
if save_figs:
n_img_per_fig = 45
n_figures = int(n_samples / n_img_per_fig) + 1
for k in range(n_figures):
visualize.plot_multi_images(
[images[i][0, ...] for i in range(k * n_img_per_fig, min((k + 1) * n_img_per_fig, n_samples))],
marker_locs=landmarks[k * n_img_per_fig : min((k + 1) * n_img_per_fig, n_samples), :],
im_kwargs=dict(cfg.PLT_KWS.IM),
marker_cmap="Set1",
marker_kwargs=dict(cfg.PLT_KWS.MARKER),
marker_titles=markers,
image_titles=list(patient_ids[k * n_img_per_fig : min((k + 1) * n_img_per_fig, n_samples)]),
n_cols=5,
).savefig(
str(save_figures_location) + "/0)landmark_visualization_%s_of_%s.%s" % (k + 1, n_figures, fig_format),
**dict(cfg.SAVE_FIG_KWARGS),
)
# ---- data pre-processing ----
# ----- image registration -----
img_reg, max_dist = reg_img_stack(images.copy(), landmarks, landmarks[0])
plt_kawargs = {**{"im_kwargs": dict(cfg.PLT_KWS.IM), "image_titles": list(patient_ids)}, **dict(cfg.PLT_KWS.PLT)}
if save_figs:
visualize.plot_multi_images([img_reg[i][0, ...] for i in range(n_samples)], **plt_kawargs).savefig(
str(save_figures_location) + "/1)image_registration.%s" % fig_format, **dict(cfg.SAVE_FIG_KWARGS)
)
# ----- masking -----
img_masked = mask_img_stack(img_reg.copy(), mask)
if save_figs:
visualize.plot_multi_images([img_masked[i][0, ...] for i in range(n_samples)], **plt_kawargs).savefig(
str(save_figures_location) + "/2)masking.%s" % fig_format, **dict(cfg.SAVE_FIG_KWARGS)
)
# ----- resize -----
img_rescaled = rescale_img_stack(img_masked.copy(), scale=1 / cfg.PROC.SCALE)
if save_figs:
visualize.plot_multi_images([img_rescaled[i][0, ...] for i in range(n_samples)], **plt_kawargs).savefig(
str(save_figures_location) + "/3)resize.%s" % fig_format, **dict(cfg.SAVE_FIG_KWARGS)
)
# ----- normalization -----
img_norm = normalize_img_stack(img_rescaled.copy())
if save_figs:
visualize.plot_multi_images([img_norm[i][0, ...] for i in range(n_samples)], **plt_kawargs).savefig(
str(save_figures_location) + "/4)normalize.%s" % fig_format, **dict(cfg.SAVE_FIG_KWARGS)
)
# ---- evaluating machine learning pipeline ----
x = np.concatenate([img_norm[i].reshape((1,) + img_norm[i].shape) for i in range(n_samples)], axis=0)
trainer = MPCATrainer(classifier=cfg.PIPELINE.CLASSIFIER, n_features=200)
cv_results = cross_validate(trainer, x, y, cv=10, scoring=["accuracy", "roc_auc"], n_jobs=1)
print("Averaged training time: {:.4f} seconds".format(np.mean(cv_results["fit_time"])))
print("Averaged testing time: {:.4f} seconds".format(np.mean(cv_results["score_time"])))
print("Averaged Accuracy: {:.4f}".format(np.mean(cv_results["test_accuracy"])))
print("Averaged AUC: {:.4f}".format(np.mean(cv_results["test_roc_auc"])))
# ---- model weights interpretation ----
trainer.fit(x, y)
weights = trainer.mpca.inverse_transform(trainer.clf.coef_) - trainer.mpca.mean_
weights = rescale_img_stack(weights, cfg.PROC.SCALE) # rescale weights to original shape
weights = mask_img_stack(weights, mask) # masking weights
top_weights = model_weights.select_top_weight(weights, select_ratio=0.02) # select top 2% weights
if save_figs:
visualize.plot_weights(
top_weights[0][0],
background_img=images[0][0],
im_kwargs=dict(cfg.PLT_KWS.IM),
marker_kwargs=dict(cfg.PLT_KWS.WEIGHT),
).savefig(str(save_figures_location) + "/5)weights.%s" % fig_format, **dict(cfg.SAVE_FIG_KWARGS))
if __name__ == "__main__":
METHOD_NAME() |
298,638 | run | # Copyright (c) 2016 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import logging
from spinn_utilities.progress_bar import ProgressBar
from spinn_utilities.log import FormatAdapter
from pacman.model.resources import MultiRegionSDRAM, ConstantSDRAM
from spinn_front_end_common.abstract_models import (
AbstractRewritesDataSpecification, AbstractGeneratesDataSpecification)
from spinn_front_end_common.data import FecDataView
from spinn_front_end_common.utilities.constants import APP_PTR_TABLE_BYTE_SIZE
from spinn_front_end_common.utilities.exceptions import (
ConfigurationException, DataSpecException)
from spinn_front_end_common.interface.ds import (
DataSpecificationGenerator, DsSqlliteDatabase)
from spinn_front_end_common.utilities.utility_calls import get_report_writer
logger = FormatAdapter(logging.getLogger(__name__))
def graph_data_specification_writer(placement_order=None):
"""
:param list(~pacman.model.placements.Placement) placement_order:
the optional order in which placements should be examined
:rtype: DsSqlliteDatabase
:raises ConfigurationException:
If the DSG asks to use more SDRAM than is available.
"""
writer = _GraphDataSpecificationWriter()
return writer.METHOD_NAME(placement_order)
class _GraphDataSpecificationWriter(object):
"""
Executes the data specification generation step.
"""
__slots__ = (
# Dict of SDRAM usage by chip coordinates
"_sdram_usage",
# Dict of list of vertices by chip coordinates
"_vertices_by_chip")
def __init__(self):
self._sdram_usage = defaultdict(lambda: 0)
self._vertices_by_chip = defaultdict(list)
def METHOD_NAME(self, placement_order=None):
"""
:param list(~pacman.model.placements.Placement) placement_order:
the optional order in which placements should be examined
:return: DSG targets
:rtype: DsSqlliteDatabase
:raises ConfigurationException:
If the DSG asks to use more SDRAM than is available.
"""
# iterate though vertices and call generate_data_spec for each
# vertex
ds_db = DsSqlliteDatabase()
ds_db.write_session_credentials_to_db()
ds_db. set_app_id()
if placement_order is None:
placement_order = FecDataView.iterate_placemements()
n_placements = FecDataView.get_n_placements()
else:
n_placements = len(placement_order)
progress = ProgressBar(n_placements, "Generating data specifications")
vertices_to_reset = list()
for placement in progress.over(placement_order):
# Try to generate the data spec for the placement
vertex = placement.vertex
generated = self.__generate_data_spec_for_vertices(
placement, vertex, ds_db)
if generated and isinstance(
vertex, AbstractRewritesDataSpecification):
vertices_to_reset.append(vertex)
# If the spec wasn't generated directly, and there is an
# application vertex, try with that
if not generated and vertex.app_vertex is not None:
generated = self.__generate_data_spec_for_vertices(
placement, vertex.app_vertex, ds_db)
if generated and isinstance(
vertex.app_vertex,
AbstractRewritesDataSpecification):
vertices_to_reset.append(vertex.app_vertex)
# Ensure that the vertices know their regions have been reloaded
for vertex in vertices_to_reset:
vertex.set_reload_required(False)
self._run_check_queries(ds_db)
return ds_db
def __generate_data_spec_for_vertices(self, pl, vertex, ds_db):
"""
:param ~.Placement pl: placement of machine graph to cores
:param ~.AbstractVertex vertex: the specific vertex to write DSG for.
:param DsSqlliteDatabase ds_db:
:return: True if the vertex was data spec-able, False otherwise
:rtype: bool
:raises ConfigurationException: if things don't fit
"""
# if the vertex can generate a DSG, call it
if not isinstance(vertex, AbstractGeneratesDataSpecification):
return False
x = pl.x
y = pl.y
p = pl.p
report_writer = get_report_writer(x, y, p)
spec = DataSpecificationGenerator(
x, y, p, vertex, ds_db, report_writer)
# generate the DSG file
vertex.generate_data_specification(spec, pl)
# Check the memory usage
total_size = ds_db.get_total_regions_size(x, y, p)
region_size = APP_PTR_TABLE_BYTE_SIZE + total_size
# Check per-region memory usage if possible
sdram = vertex.sdram_required
if isinstance(sdram, MultiRegionSDRAM):
region_sizes = ds_db.get_region_sizes(x, y, p)
for i, size in region_sizes.items():
est_size = sdram.regions.get(i, ConstantSDRAM(0))
est_size = est_size.get_total_sdram(
FecDataView.get_max_run_time_steps())
if size > est_size:
# pylint: disable=logging-too-many-args
logger.warning(
"Region {} of vertex {} is bigger than expected: "
"{} estimated vs. {} actual",
i, vertex.label, est_size, size)
self._vertices_by_chip[x, y].append(vertex)
self._sdram_usage[x, y] += total_size
if (self._sdram_usage[x, y] <=
FecDataView().get_chip_at(x, y).sdram):
return True
# creating the error message which contains the memory usage of
# what each core within the chip uses and its original estimate.
memory_usage = "\n".join(
" {}: {} (total={}, estimated={})".format(
vert, region_size, sum(region_size),
vert.sdram_required.get_total_sdram(
FecDataView.get_max_run_time_steps()))
for vert in self._vertices_by_chip[x, y])
raise ConfigurationException(
f"Too much SDRAM has been used on {x}, {y}. Vertices and"
f" their usage on that chip is as follows:\n{memory_usage}")
def _run_check_queries(self, ds_db):
msg = ""
for bad in ds_db.get_unlinked_references():
x, y, p, region, reference, label = bad
if label is None:
label = ""
else:
label = f"({label})"
msg = f"{msg}core {x}:{y}:{p} has a broken reference " \
f"{reference}{label} from region {region} "
for bad in ds_db.get_double_region():
x, y, p, region = bad
msg = f"{msg}core {x}:{y}:{p} {region} " \
f"has both a region reserve and a reference "
if msg != "":
raise DataSpecException(msg) |
298,639 | load pretrained | # Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved.
import os.path as osp
from typing import Any, Dict
import cv2
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from modelscope.metainfo import Pipelines
from modelscope.models.cv.animal_recognition import resnet
from modelscope.outputs import OutputKeys
from modelscope.pipelines.base import Input, Pipeline
from modelscope.pipelines.builder import PIPELINES
from modelscope.preprocessors import LoadImage, load_image
from modelscope.utils.constant import ModelFile, Tasks
from modelscope.utils.logger import get_logger
logger = get_logger()
@PIPELINES.register_module(
Tasks.general_recognition, module_name=Pipelines.general_recognition)
class GeneralRecognitionPipeline(Pipeline):
def __init__(self, model: str, device: str):
"""
use `model` to create a general recognition pipeline for prediction
Args:
model: model id on modelscope hub.
"""
super().__init__(model=model)
import torch
def resnest101(**kwargs):
model = resnet.ResNet(
resnet.Bottleneck, [3, 4, 23, 3],
radix=2,
groups=1,
bottleneck_width=64,
deep_stem=True,
stem_width=64,
avg_down=True,
avd=True,
avd_first=False,
**kwargs)
return model
def filter_param(src_params, own_state):
copied_keys = []
for name, param in src_params.items():
if 'module.' == name[0:7]:
name = name[7:]
if '.module.' not in list(own_state.keys())[0]:
name = name.replace('.module.', '.')
if (name in own_state) and (own_state[name].shape
== param.shape):
own_state[name].copy_(param)
copied_keys.append(name)
def METHOD_NAME(model, src_params):
if 'state_dict' in src_params:
src_params = src_params['state_dict']
own_state = model.state_dict()
filter_param(src_params, own_state)
model.load_state_dict(own_state)
device = 'cpu'
self.local_path = self.model
src_params = torch.load(
osp.join(self.local_path, ModelFile.TORCH_MODEL_FILE), device)
self.model = resnest101(num_classes=54092)
METHOD_NAME(self.model, src_params)
logger.info('load model done')
def preprocess(self, input: Input) -> Dict[str, Any]:
img = LoadImage.convert_to_img(input)
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(), normalize
])
img = transform(img)
result = {'img': img}
return result
def forward(self, input: Dict[str, Any]) -> Dict[str, Any]:
def set_phase(model, is_train):
if is_train:
model.train()
else:
model.eval()
is_train = False
set_phase(self.model, is_train)
img = input['img']
input_img = torch.unsqueeze(img, 0)
outputs = self.model(input_img)
return {'outputs': outputs}
def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
label_mapping_path = osp.join(self.local_path, 'meta_info.txt')
with open(label_mapping_path, 'r', encoding='utf-8') as f:
label_mapping = f.readlines()
score = torch.max(inputs['outputs'])
inputs = {
OutputKeys.SCORES: [score.item()],
OutputKeys.LABELS:
[label_mapping[inputs['outputs'].argmax()].split('\t')[1]]
}
return inputs |
298,640 | add if defined for provision config instance | import logging
import os
import uuid
from osbenchmark import paths
from osbenchmark.builder.installers.installer import Installer
from osbenchmark.builder.models.node import Node
from osbenchmark.builder.utils.config_applier import ConfigApplier
from osbenchmark.builder.utils.host_cleaner import HostCleaner
from osbenchmark.builder.utils.path_manager import PathManager
from osbenchmark.builder.utils.template_renderer import TemplateRenderer
from osbenchmark.utils import io
class DockerInstaller(Installer):
def __init__(self, provision_config_instance, executor):
super().__init__(executor)
self.logger = logging.getLogger(__name__)
self.provision_config_instance = provision_config_instance
self.template_renderer = TemplateRenderer()
self.path_manager = PathManager(executor)
self.config_applier = ConfigApplier(executor, self.template_renderer, self.path_manager)
self.host_cleaner = HostCleaner(self.path_manager)
def install(self, host, binaries, all_node_ips):
node = self._create_node()
self._prepare_node(host, node)
return node
def _create_node(self):
node_name = str(uuid.uuid4())
node_port = int(self.provision_config_instance.variables["node"]["port"])
node_root_dir = os.path.join(self.provision_config_instance.variables["test_execution_root"], node_name)
node_data_paths = [os.path.join(node_root_dir, "data", str(uuid.uuid4()))]
node_binary_path = os.path.join(node_root_dir, "install")
node_log_dir = os.path.join(node_root_dir, "logs", "server")
node_heap_dump_dir = os.path.join(node_root_dir, "heapdump")
return Node(name=node_name,
port=node_port,
pid=None,
root_dir=node_root_dir,
binary_path=node_binary_path,
log_path=node_log_dir,
heap_dump_path=node_heap_dump_dir,
data_paths=node_data_paths,
telemetry=None)
def _prepare_node(self, host, node):
directories_to_create = [node.binary_path, node.log_path, node.heap_dump_path, node.data_paths[0]]
for directory_to_create in directories_to_create:
self.path_manager.create_path(host, directory_to_create)
mounts = self._prepare_mounts(host, node)
docker_cfg = self._render_template_from_docker_file(self._get_docker_vars(node, mounts))
self.logger.info("Installing Docker container with configuration:\n%s", docker_cfg)
docker_compose_file = os.path.join(node.binary_path, "docker-compose.yml")
with open(docker_compose_file, mode="wt", encoding="utf-8") as f:
f.write(docker_cfg)
self.executor.execute(host, f"cp {docker_compose_file} {docker_compose_file}")
def _prepare_mounts(self, host, node):
config_vars = self._get_config_vars(node)
return self.config_applier.apply_configs(host, node, self.provision_config_instance.config_paths, config_vars)
def _get_config_vars(self, node):
provisioner_defaults = {
"cluster_name": self.provision_config_instance.variables["cluster_name"],
"node_name": node.name,
# we bind-mount the directories below on the host to these ones.
"install_root_path": "/usr/share/opensearch",
"data_paths": ["/usr/share/opensearch/data"],
"log_path": "/var/log/opensearch",
"heap_dump_path": "/usr/share/opensearch/heapdump",
# Docker container needs to expose service on external interfaces
"network_host": "0.0.0.0",
"discovery_type": "single-node",
"http_port": str(node.port),
"transport_port": str(node.port + 100),
"cluster_settings": {}
}
config_vars = {}
config_vars.update(self.provision_config_instance.variables["origin"]["docker"])
config_vars.update(provisioner_defaults)
return config_vars
def _get_docker_vars(self, node, mounts):
docker_vars = {
"os_version": self.provision_config_instance.variables["origin"]["distribution"]["version"],
"docker_image": self.provision_config_instance.variables["origin"]["docker"]["docker_image"],
"http_port": node.port,
"os_data_dir": node.data_paths[0],
"os_log_dir": node.log_path,
"os_heap_dump_dir": node.heap_dump_path,
"mounts": mounts
}
self.METHOD_NAME(docker_vars, "docker_mem_limit")
self.METHOD_NAME(docker_vars, "docker_cpu_count")
return docker_vars
def METHOD_NAME(self, variables, key):
if key in self.provision_config_instance.variables["origin"]["docker"]:
variables[key] = self.provision_config_instance.variables["origin"]["docker"][key]
def _render_template_from_docker_file(self, variables):
compose_file = os.path.join(paths.benchmark_root(), "resources", "docker-compose.yml.j2")
return self.template_renderer.render_template_file(io.dirname(compose_file), variables, compose_file)
def cleanup(self, host):
self.host_cleaner.cleanup(host, self.provision_config_instance.variables["preserve_install"]) |
298,641 | sigmoid focal loss | # The implementation is adopted from MTTR,
# made publicly available under the Apache 2.0 License at https://github.com/mttr2021/MTTR
# Modified from DETR https://github.com/facebookresearch/detr
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class FPNSpatialDecoder(nn.Module):
"""
An FPN-like spatial decoder. Generates high-res, semantically rich features which serve as the base for creating
instance segmentation masks.
"""
def __init__(self, context_dim, fpn_dims, mask_kernels_dim=8):
super().__init__()
inter_dims = [
context_dim, context_dim // 2, context_dim // 4, context_dim // 8,
context_dim // 16
]
self.lay1 = torch.nn.Conv2d(context_dim, inter_dims[0], 3, padding=1)
self.gn1 = torch.nn.GroupNorm(8, inter_dims[0])
self.lay2 = torch.nn.Conv2d(inter_dims[0], inter_dims[1], 3, padding=1)
self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])
self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])
self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])
self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
self.context_dim = context_dim
self.add_extra_layer = len(fpn_dims) == 3
if self.add_extra_layer:
self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
self.lay5 = torch.nn.Conv2d(
inter_dims[3], inter_dims[4], 3, padding=1)
self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])
self.out_lay = torch.nn.Conv2d(
inter_dims[4], mask_kernels_dim, 3, padding=1)
else:
self.out_lay = torch.nn.Conv2d(
inter_dims[3], mask_kernels_dim, 3, padding=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor, layer_features: List[Tensor]):
x = self.lay1(x)
x = self.gn1(x)
x = F.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = F.relu(x)
cur_fpn = self.adapter1(layer_features[0])
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay3(x)
x = self.gn3(x)
x = F.relu(x)
cur_fpn = self.adapter2(layer_features[1])
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay4(x)
x = self.gn4(x)
x = F.relu(x)
if self.add_extra_layer:
cur_fpn = self.adapter3(layer_features[2])
x = cur_fpn + F.interpolate(
x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay5(x)
x = self.gn5(x)
x = F.relu(x)
x = self.out_lay(x)
return x
def num_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def dice_loss(inputs, targets, num_masks):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_masks
def METHOD_NAME(inputs,
targets,
num_masks,
alpha: float = 0.25,
gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(
inputs, targets, reduction='none')
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t)**gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_masks |
298,642 | test set multiple athena result | import boto3
import requests
from moto import mock_athena, mock_sts, settings
DEFAULT_COLUMN_INFO = [
{
"CatalogName": "string",
"SchemaName": "string",
"TableName": "string",
"Name": "string",
"Label": "string",
"Type": "string",
"Precision": 123,
"Scale": 123,
"Nullable": "NOT_NULL",
"CaseSensitive": True,
}
]
@mock_athena
def test_set_athena_result():
base_url = (
"localhost:5000" if settings.TEST_SERVER_MODE else "motoapi.amazonaws.com"
)
athena_result = {
"results": [
{
"rows": [
{"Data": [{"VarCharValue": "1"}]},
],
"column_info": DEFAULT_COLUMN_INFO,
}
]
}
resp = requests.post(
f"http://{base_url}/moto-api/static/athena/query-results",
json=athena_result,
)
assert resp.status_code == 201
client = boto3.client("athena", region_name="us-east-1")
details = client.get_query_results(QueryExecutionId="anyid")["ResultSet"]
assert details["Rows"] == athena_result["results"][0]["rows"]
assert details["ResultSetMetadata"]["ColumnInfo"] == DEFAULT_COLUMN_INFO
# Operation should be idempotent
details = client.get_query_results(QueryExecutionId="anyid")["ResultSet"]
assert details["Rows"] == athena_result["results"][0]["rows"]
# Different ID should still return different (default) results though
details = client.get_query_results(QueryExecutionId="otherid")["ResultSet"]
assert details["Rows"] == []
@mock_athena
def METHOD_NAME():
base_url = (
"localhost:5000" if settings.TEST_SERVER_MODE else "motoapi.amazonaws.com"
)
athena_result = {
"results": [
{"rows": [{"Data": [{"VarCharValue": "1"}]}]},
{"rows": [{"Data": [{"VarCharValue": "2"}]}]},
{"rows": [{"Data": [{"VarCharValue": "3"}]}]},
]
}
resp = requests.post(
f"http://{base_url}/moto-api/static/athena/query-results",
json=athena_result,
)
assert resp.status_code == 201
client = boto3.client("athena", region_name="us-east-1")
details = client.get_query_results(QueryExecutionId="first_id")["ResultSet"]
assert details["Rows"] == [{"Data": [{"VarCharValue": "1"}]}]
# The same ID should return the same data
details = client.get_query_results(QueryExecutionId="first_id")["ResultSet"]
assert details["Rows"] == [{"Data": [{"VarCharValue": "1"}]}]
# The next ID should return different data
details = client.get_query_results(QueryExecutionId="second_id")["ResultSet"]
assert details["Rows"] == [{"Data": [{"VarCharValue": "2"}]}]
# The last ID should return even different data
details = client.get_query_results(QueryExecutionId="third_id")["ResultSet"]
assert details["Rows"] == [{"Data": [{"VarCharValue": "3"}]}]
# Any other calls should return the default data
details = client.get_query_results(QueryExecutionId="other_id")["ResultSet"]
assert details["Rows"] == []
@mock_athena
@mock_sts
def test_set_athena_result_with_custom_region_account():
base_url = (
"localhost:5000" if settings.TEST_SERVER_MODE else "motoapi.amazonaws.com"
)
athena_result = {
"account_id": "222233334444",
"region": "eu-west-1",
"results": [
{
"rows": [
{"Data": [{"VarCharValue": "1"}]},
],
"column_info": DEFAULT_COLUMN_INFO,
}
],
}
resp = requests.post(
f"http://{base_url}/moto-api/static/athena/query-results",
json=athena_result,
)
assert resp.status_code == 201
sts = boto3.client("sts", "us-east-1")
cross_account_creds = sts.assume_role(
RoleArn="arn:aws:iam::222233334444:role/role-in-another-account",
RoleSessionName="test-session-name",
ExternalId="test-external-id",
)["Credentials"]
athena_in_other_account = boto3.client(
"athena",
aws_access_key_id=cross_account_creds["AccessKeyId"],
aws_secret_access_key=cross_account_creds["SecretAccessKey"],
aws_session_token=cross_account_creds["SessionToken"],
region_name="eu-west-1",
)
details = athena_in_other_account.get_query_results(QueryExecutionId="anyid")[
"ResultSet"
]
assert details["Rows"] == athena_result["results"][0]["rows"]
assert details["ResultSetMetadata"]["ColumnInfo"] == DEFAULT_COLUMN_INFO
# query results from other regions do not match
athena_in_diff_region = boto3.client(
"athena",
aws_access_key_id=cross_account_creds["AccessKeyId"],
aws_secret_access_key=cross_account_creds["SecretAccessKey"],
aws_session_token=cross_account_creds["SessionToken"],
region_name="eu-west-2",
)
details = athena_in_diff_region.get_query_results(QueryExecutionId="anyid")[
"ResultSet"
]
assert details["Rows"] == []
# query results from default account does not match
client = boto3.client("athena", region_name="eu-west-1")
details = client.get_query_results(QueryExecutionId="anyid")["ResultSet"]
assert details["Rows"] == [] |
298,643 | pre operations | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network asg show",
)
class Show(AAZCommand):
"""Get details of an application security group.
:example: Get details of an application security group.
az network asg show -g MyResourceGroup -n MyAsg
"""
_aaz_info = {
"version": "2021-08-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/applicationsecuritygroups/{}", "2021-08-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the new application security group resource.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.METHOD_NAME()
self.ApplicationSecurityGroupsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def METHOD_NAME(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class ApplicationSecurityGroupsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"applicationSecurityGroupName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-08-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.id = AAZStrType()
_schema_on_200.location = AAZStrType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
298,644 | move | import sys
from _typeshed import ReadableBuffer, Unused
from collections.abc import Iterable, Iterator, Sized
from typing import NoReturn, overload
from typing_extensions import Self
ACCESS_DEFAULT: int
ACCESS_READ: int
ACCESS_WRITE: int
ACCESS_COPY: int
ALLOCATIONGRANULARITY: int
if sys.platform == "linux":
MAP_DENYWRITE: int
MAP_EXECUTABLE: int
if sys.version_info >= (3, 10):
MAP_POPULATE: int
if sys.platform != "win32":
MAP_ANON: int
MAP_ANONYMOUS: int
MAP_PRIVATE: int
MAP_SHARED: int
PROT_EXEC: int
PROT_READ: int
PROT_WRITE: int
PAGESIZE: int
class mmap(Iterable[int], Sized):
if sys.platform == "win32":
def __init__(self, fileno: int, length: int, tagname: str | None = ..., access: int = ..., offset: int = ...) -> None: ...
else:
def __init__(
self, fileno: int, length: int, flags: int = ..., prot: int = ..., access: int = ..., offset: int = ...
) -> None: ...
def close(self) -> None: ...
if sys.version_info >= (3, 8):
def flush(self, offset: int = ..., size: int = ...) -> None: ...
else:
def flush(self, offset: int = ..., size: int = ...) -> int: ...
def METHOD_NAME(self, dest: int, src: int, count: int) -> None: ...
def read_byte(self) -> int: ...
def readline(self) -> bytes: ...
def resize(self, newsize: int) -> None: ...
def seek(self, pos: int, whence: int = ...) -> None: ...
def size(self) -> int: ...
def tell(self) -> int: ...
def write_byte(self, byte: int) -> None: ...
def __len__(self) -> int: ...
closed: bool
if sys.version_info >= (3, 8) and sys.platform != "win32":
def madvise(self, option: int, start: int = ..., length: int = ...) -> None: ...
def find(self, sub: ReadableBuffer, start: int = ..., stop: int = ...) -> int: ...
def rfind(self, sub: ReadableBuffer, start: int = ..., stop: int = ...) -> int: ...
def read(self, n: int | None = ...) -> bytes: ...
def write(self, bytes: ReadableBuffer) -> int: ...
@overload
def __getitem__(self, __key: int) -> int: ...
@overload
def __getitem__(self, __key: slice) -> bytes: ...
def __delitem__(self, __key: int | slice) -> NoReturn: ...
@overload
def __setitem__(self, __key: int, __value: int) -> None: ...
@overload
def __setitem__(self, __key: slice, __value: ReadableBuffer) -> None: ...
# Doesn't actually exist, but the object actually supports "in" because it has __getitem__,
# so we claim that there is also a __contains__ to help type checkers.
def __contains__(self, __o: object) -> bool: ...
# Doesn't actually exist, but the object is actually iterable because it has __getitem__ and __len__,
# so we claim that there is also an __iter__ to help type checkers.
def __iter__(self) -> Iterator[int]: ...
def __enter__(self) -> Self: ...
def __exit__(self, *args: Unused) -> None: ...
def __buffer__(self, __flags: int) -> memoryview: ...
def __release_buffer__(self, __buffer: memoryview) -> None: ...
if sys.version_info >= (3, 8) and sys.platform != "win32":
MADV_NORMAL: int
MADV_RANDOM: int
MADV_SEQUENTIAL: int
MADV_WILLNEED: int
MADV_DONTNEED: int
MADV_FREE: int
if sys.platform == "linux":
MADV_REMOVE: int
MADV_DONTFORK: int
MADV_DOFORK: int
MADV_HWPOISON: int
MADV_MERGEABLE: int
MADV_UNMERGEABLE: int
# Seems like this constant is not defined in glibc.
# See https://github.com/python/typeshed/pull/5360 for details
# MADV_SOFT_OFFLINE: int
MADV_HUGEPAGE: int
MADV_NOHUGEPAGE: int
MADV_DONTDUMP: int
MADV_DODUMP: int
# This Values are defined for FreeBSD but type checkers do not support conditions for these
if sys.platform != "linux" and sys.platform != "darwin":
MADV_NOSYNC: int
MADV_AUTOSYNC: int
MADV_NOCORE: int
MADV_CORE: int
MADV_PROTECT: int
if sys.version_info >= (3, 10) and sys.platform == "darwin":
MADV_FREE_REUSABLE: int
MADV_FREE_REUSE: int |
298,645 | test sin | # Copyright HeteroCL authors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import heterocl as hcl
def test_exp():
shape = (1, 10)
test_dtype = hcl.Float(32)
def loop_body(data):
return hcl.compute(
shape,
lambda x, y: hcl.exp(data[x, y]),
name="loop_body",
dtype=test_dtype,
)
A = hcl.placeholder(shape, "A", dtype=test_dtype)
s = hcl.create_schedule([A], loop_body)
f = hcl.build(s)
np_a = np.random.randint(5, size=shape)
hcl_A = hcl.asarray(np_a, dtype=test_dtype)
hcl_B = hcl.asarray(np.zeros(shape), dtype=test_dtype)
f(hcl_A, hcl_B)
np_b = hcl_B.asnumpy()
b_golden = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
b_golden[i, j] = np.exp(np_a[i, j])
assert np.allclose(np_b, b_golden)
def test_power():
shape = (1, 10)
def loop_body(data, power):
return hcl.compute(
shape, lambda x, y: hcl.power(data[x, y], power[x, y]), "loop_body"
)
A = hcl.placeholder(shape, "A")
B = hcl.placeholder(shape, "B")
s = hcl.create_schedule([A, B], loop_body)
f = hcl.build(s)
np_a = np.random.randint(5, size=shape)
np_b = np.random.randint(5, size=shape)
hcl_A = hcl.asarray(np_a)
hcl_B = hcl.asarray(np_b)
hcl_C = hcl.asarray(np.zeros(shape))
f(hcl_A, hcl_B, hcl_C)
np_c = hcl_C.asnumpy()
c_golden = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
c_golden[i, j] = np.power(np_a[i, j], np_b[i, j])
assert np.allclose(np_c, c_golden)
def test_log():
shape = (1, 10)
test_dtype = hcl.Float(32)
def loop_body(data):
return hcl.compute(
shape, lambda x, y: hcl.log(data[x, y]), "loop_body", dtype=test_dtype
)
A = hcl.placeholder(shape, "A", dtype=test_dtype)
s = hcl.create_schedule([A], loop_body)
f = hcl.build(s)
np_a = np.random.randint(1, 10, size=shape)
hcl_A = hcl.asarray(np_a, dtype=test_dtype)
hcl_B = hcl.asarray(np.zeros(shape), dtype=test_dtype)
f(hcl_A, hcl_B)
np_b = hcl_B.asnumpy()
b_golden = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
b_golden[i, j] = np.log(np_a[i, j])
assert np.allclose(np_b, b_golden)
def test_log2():
shape = (1, 10)
test_dtype = hcl.Float(32)
def loop_body(data):
return hcl.compute(
shape, lambda x, y: hcl.log2(data[x, y]), "loop_body", dtype=test_dtype
)
A = hcl.placeholder(shape, "A", dtype=test_dtype)
s = hcl.create_schedule([A], loop_body)
f = hcl.build(s)
np_a = np.random.randint(1, 10, size=shape)
hcl_A = hcl.asarray(np_a, dtype=test_dtype)
hcl_B = hcl.asarray(np.zeros(shape), dtype=test_dtype)
f(hcl_A, hcl_B)
np_b = hcl_B.asnumpy()
b_golden = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
b_golden[i, j] = np.log2(np_a[i, j])
assert np.allclose(np_b, b_golden)
def test_log10():
shape = (1, 10)
test_dtype = hcl.Float(32)
def loop_body(data):
return hcl.compute(
shape, lambda x, y: hcl.log10(data[x, y]), "loop_body", dtype=test_dtype
)
A = hcl.placeholder(shape, "A", dtype=test_dtype)
s = hcl.create_schedule([A], loop_body)
f = hcl.build(s)
np_a = np.random.randint(0, 10, size=shape)
hcl_A = hcl.asarray(np_a, dtype=test_dtype)
hcl_B = hcl.asarray(np.zeros(shape), dtype=test_dtype)
f(hcl_A, hcl_B)
np_b = hcl_B.asnumpy()
b_golden = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
b_golden[i, j] = np.log10(np_a[i, j])
assert np.allclose(np_b, b_golden)
def test_sqrt():
shape = (1, 10)
test_dtype = hcl.Float(32)
def loop_body(data):
return hcl.compute(
shape, lambda x, y: hcl.sqrt(data[x, y]), "loop_body", dtype=test_dtype
)
A = hcl.placeholder(shape, "A", dtype=test_dtype)
s = hcl.create_schedule([A], loop_body)
f = hcl.build(s)
np_a = np.random.randint(100, size=shape)
hcl_A = hcl.asarray(np_a, dtype=test_dtype)
hcl_B = hcl.asarray(np.zeros(shape), dtype=test_dtype)
f(hcl_A, hcl_B)
np_b = hcl_B.asnumpy()
b_golden = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
b_golden[i, j] = np.sqrt(np_a[i, j])
assert np.allclose(np_b, b_golden)
def METHOD_NAME():
shape = (1, 10)
test_dtype = hcl.Float(32)
def loop_body(data):
return hcl.compute(
shape, lambda x, y: hcl.sin(data[x, y]), "loop_body", dtype=test_dtype
)
A = hcl.placeholder(shape, "A", dtype=test_dtype)
s = hcl.create_schedule([A], loop_body)
f = hcl.build(s)
np_a = np.random.randint(10, size=shape)
hcl_A = hcl.asarray(np_a, dtype=test_dtype)
hcl_B = hcl.asarray(np.zeros(shape), dtype=test_dtype)
f(hcl_A, hcl_B)
np_b = hcl_B.asnumpy()
b_golden = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
b_golden[i, j] = np.sin(np_a[i, j])
assert np.allclose(np_b, b_golden)
def test_cos():
shape = (1, 10)
test_dtype = hcl.Float(32)
def loop_body(data):
return hcl.compute(
shape, lambda x, y: hcl.cos(data[x, y]), "loop_body", dtype=test_dtype
)
A = hcl.placeholder(shape, "A", dtype=test_dtype)
s = hcl.create_schedule([A], loop_body)
f = hcl.build(s)
np_a = np.random.randint(10, size=shape)
hcl_A = hcl.asarray(np_a, dtype=test_dtype)
hcl_B = hcl.asarray(np.zeros(shape), dtype=test_dtype)
f(hcl_A, hcl_B)
np_b = hcl_B.asnumpy()
b_golden = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
b_golden[i, j] = np.cos(np_a[i, j])
assert np.allclose(np_b, b_golden)
def test_tanh():
shape = (1, 10)
test_dtype = hcl.Float(32)
def loop_body(data):
return hcl.compute(
shape, lambda x, y: hcl.tanh(data[x, y]), "loop_body", dtype=test_dtype
)
A = hcl.placeholder(shape, "A", dtype=test_dtype)
s = hcl.create_schedule([A], loop_body)
f = hcl.build(s)
np_a = np.random.randint(10, size=shape)
hcl_A = hcl.asarray(np_a, dtype=test_dtype)
hcl_B = hcl.asarray(np.zeros(shape), dtype=test_dtype)
f(hcl_A, hcl_B)
np_b = hcl_B.asnumpy()
b_golden = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
b_golden[i, j] = np.tanh(np_a[i, j])
assert np.allclose(np_b, b_golden) |
298,646 | get description | from copy import deepcopy
from pathlib import Path
from typing import Optional
import ruamel.yaml
import torch
import typer
from jinja2 import Template
from pydantic import Field
from ...utils.factory import (
ApplyPipelineFactory,
DataFactory,
NodeModelFactory,
PipelineBase,
)
from ...utils.yaml_dump import deep_convert_dict, merge_comment
@ApplyPipelineFactory.register("nodepred-ns")
class ApplyNodepredNsPipeline(PipelineBase):
def __init__(self):
self.pipeline = {"name": "nodepred-ns", "mode": "apply"}
@classmethod
def setup_user_cfg_cls(cls):
from ...utils.enter_config import UserConfig
class ApplyNodePredUserConfig(UserConfig):
data: DataFactory.filter(
"nodepred-ns"
).get_pydantic_config() = Field(..., discriminator="name")
cls.user_cfg_cls = ApplyNodePredUserConfig
@property
def user_cfg_cls(self):
return self.__class__.user_cfg_cls
def get_cfg_func(self):
def config(
data: DataFactory.filter(
"nodepred-ns"
).get_dataset_enum() = typer.Option(None, help="input data name"),
cfg: Optional[str] = typer.Option(
None, help="output configuration file path"
),
cpt: str = typer.Option(..., help="input checkpoint file path"),
):
# Training configuration
train_cfg = torch.load(cpt)["cfg"]
if data is None:
print("data is not specified, use the training dataset")
data = train_cfg["data_name"]
else:
data = data.name
if cfg is None:
cfg = (
"_".join(
["apply", "nodepred-ns", data, train_cfg["model_name"]]
)
+ ".yaml"
)
self.__class__.setup_user_cfg_cls()
generated_cfg = {
"pipeline_name": self.pipeline["name"],
"pipeline_mode": self.pipeline["mode"],
"device": train_cfg["device"],
"data": {"name": data},
"cpt_path": cpt,
"general_pipeline": {"save_path": "apply_results"},
}
output_cfg = self.user_cfg_cls(**generated_cfg).dict()
output_cfg = deep_convert_dict(output_cfg)
# Not applicable for inference
output_cfg["data"].pop("split_ratio")
comment_dict = {
"device": "Torch device name, e.g., cpu or cuda or cuda:0",
"cpt_path": "Path to the checkpoint file",
"general_pipeline": {
"save_path": "Directory to save the inference results"
},
}
comment_dict = merge_comment(output_cfg, comment_dict)
yaml = ruamel.yaml.YAML()
yaml.dump(comment_dict, Path(cfg).open("w"))
print(
"Configuration file is generated at {}".format(
Path(cfg).absolute()
)
)
return config
@classmethod
def gen_script(cls, user_cfg_dict):
# Check validation
cls.setup_user_cfg_cls()
cls.user_cfg_cls(**user_cfg_dict)
# Training configuration
train_cfg = torch.load(user_cfg_dict["cpt_path"])["cfg"]
# Dict for code rendering
render_cfg = deepcopy(user_cfg_dict)
model_name = train_cfg["model_name"]
model_code = NodeModelFactory.get_source_code(model_name)
render_cfg["model_code"] = model_code
render_cfg["model_class_name"] = NodeModelFactory.get_model_class_name(
model_name
)
render_cfg.update(
DataFactory.get_generated_code_dict(user_cfg_dict["data"]["name"])
)
# Dict for defining cfg in the rendered code
generated_user_cfg = deepcopy(user_cfg_dict)
generated_user_cfg["data"].pop("name")
generated_user_cfg.pop("pipeline_name")
generated_user_cfg.pop("pipeline_mode")
# model arch configuration
generated_user_cfg["model"] = train_cfg["model"]
render_cfg["user_cfg_str"] = f"cfg = {str(generated_user_cfg)}"
render_cfg["user_cfg"] = user_cfg_dict
file_current_dir = Path(__file__).resolve().parent
with open(file_current_dir / "nodepred-ns.jinja-py", "r") as f:
template = Template(f.read())
return template.render(**render_cfg)
@staticmethod
def METHOD_NAME() -> str:
return "Node classification neighbor sampling pipeline for inference" |
298,647 | mk glsa | # misc things useful for tests.
from snakeoil.mappings import AttrAccessible
from ..ebuild.atom import atom
from ..ebuild.conditionals import DepSet
from ..ebuild.cpv import CPV
from ..ebuild.eapi import get_eapi
from ..ebuild.ebuild_src import package
from ..ebuild.misc import collapsed_restrict_to_data
from ..ebuild.repo_objs import RepoConfig
from ..package.metadata import factory
from ..repository.util import SimpleTree
from ..restrictions import packages
default_arches = {"x86", "ppc", "amd64", "ia64"}
Options = AttrAccessible
class FakePkgBase(package):
__slots__ = ()
def __init__(self, cpvstr, data=None, shared=None, repo=None):
if data is None:
data = {}
for x in ("DEPEND", "RDEPEND", "PDEPEND", "IUSE", "LICENSE"):
data.setdefault(x, "")
data.setdefault("KEYWORDS", " ".join(default_arches))
cpv = CPV(cpvstr, versioned=True)
super().__init__(shared, repo, cpv.category, cpv.package, cpv.fullver)
object.__setattr__(self, "data", data)
class FakeProfile:
def __init__(
self,
masked_use={},
forced_use={},
provides={},
masks=[],
virtuals={},
arch="x86",
name="none",
):
self.provides_repo = SimpleTree(provides)
self.masked_use = {atom(k): v for k, v in masked_use.items()}
self.forced_use = {atom(k): v for k, v in forced_use.items()}
self.masks = tuple(map(atom, masks))
self.virtuals = SimpleTree(virtuals)
self.arch = arch
self.name = name
self.forced_data = collapsed_restrict_to_data(
[(packages.AlwaysTrue, (self.arch,))], self.forced_use.items()
)
self.masked_data = collapsed_restrict_to_data(
[(packages.AlwaysTrue, default_arches)], self.masked_use.items()
)
def make_virtuals_repo(self, repo):
return self.virtuals
class FakeRepo:
def __init__(self, pkgs=(), repo_id="", location="", masks=(), **kwds):
self.pkgs = pkgs
self.repo_id = repo_id or location
self.location = location
self.masks = masks
for k, v in kwds.items():
setattr(self, k, v)
def itermatch(self, restrict, sorter=iter, pkg_cls=lambda x: x, **kwargs):
return filter(restrict.match, list(map(pkg_cls, sorter(self.pkgs))))
def match(self, restrict, **kwargs):
return list(self.itermatch(restrict, **kwargs))
@property
def masked(self):
return packages.OrRestriction(*self.masks)
def __iter__(self):
return self.itermatch(packages.AlwaysTrue)
def __contains__(self, obj):
"""Determine if a path or a package is in a repo."""
if isinstance(obj, str):
if self.location and obj.startswith(self.location):
return True
return False
else:
for pkg in self.itermatch(obj):
return True
return False
class FakeEbuildRepo(FakeRepo):
def __init__(self, *args, **kwds):
self.config = kwds.pop("config", RepoConfig("nonexistent"))
self.trees = (self,)
super().__init__(*args, **kwds)
class FakePkg(FakePkgBase):
def __init__(
self,
cpv,
eapi="0",
slot="0",
subslot=None,
iuse=None,
use=(),
repo=FakeRepo(),
restrict="",
keywords=None,
**kwargs,
):
if isinstance(repo, str):
repo = FakeRepo(repo)
elif isinstance(repo, (tuple, list)) and len(repo) < 3:
repo = FakeRepo(*repo)
super().__init__(cpv, repo=factory(repo), **kwargs)
if keywords is not None:
object.__setattr__(self, "keywords", set(keywords))
object.__setattr__(self, "slot", str(slot))
if subslot is None:
subslot = slot
object.__setattr__(self, "subslot", subslot)
object.__setattr__(self, "restrict", DepSet.parse(restrict, str))
object.__setattr__(self, "fetchables", [])
object.__setattr__(self, "use", set(use))
object.__setattr__(self, "eapi", get_eapi(eapi, False))
if iuse is not None:
object.__setattr__(self, "iuse", set(iuse))
# misc setup code for generating glsas for testing
glsa_template = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE glsa SYSTEM "http://www.gentoo.org/dtd/glsa.dtd">
<?xml-stylesheet href="/xsl/glsa.xsl" type="text/xsl"?>
<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl"?>
<glsa id="%s">
<title>generated glsa for %s</title>
<synopsis>
foon
</synopsis>
<product type="ebuild">foon</product>
<announced>2003-11-23</announced>
<revised>2003-11-23: 01</revised>
<bug>33989</bug>
<access>remote</access>
<affected>%s</affected>
<background>
<p>FreeRADIUS is a popular open source RADIUS server.</p>
</background>
<description>
<p>foon</p>
</description>
<impact type="normal">
<p>
impact-rific
</p>
</impact>
<workaround>
<p>redundant if no workaround</p>
</workaround>
<resolution>
<p>blarh</p>
</resolution>
<references>
<uri link="http://www.securitytracker.com/alerts/2003/Nov/1008263.html">SecurityTracker.com Security Alert</uri>
</references>
</glsa>
"""
ops = {">": "gt", "<": "lt"}
ops.update((k + "=", v[0] + "e") for k, v in list(ops.items()))
ops.update(("~" + k, "r" + v) for k, v in list(ops.items()))
ops["="] = "eq"
def convert_range(text, tag, slot):
i = 0
while text[i] in "><=~":
i += 1
op = text[:i]
text = text[i:]
range = ops[op]
slot = f' slot="{slot}"' if slot else ""
return f'<{tag} range="{range}"{slot}>{text}</{tag}>'
def METHOD_NAME(*pkgs, **kwds):
id = kwds.pop("id", None)
if kwds:
raise TypeError("id is the only allowed kwds; got %r" % kwds)
id = str(id)
horked = ""
for data in pkgs:
if len(data) == 4:
pkg, slot, ranges, arch = data
elif len(data) == 3:
pkg, ranges, arch = data
slot = ""
else:
pkg, ranges = data
slot = ""
arch = "*"
horked += '<package name="%s" auto="yes" arch="%s">%s%s\n</package>' % (
pkg,
arch,
"\n".join(convert_range(x, "unaffected", slot) for x in ranges[0]),
"\n".join(convert_range(x, "vulnerable", slot) for x in ranges[1]),
)
return glsa_template % (id, id, horked) |
298,648 | url | import logging
from typing import Optional, List, Dict, Union, Any
from haystack.nodes.prompt.invocation_layer.handlers import DefaultTokenStreamingHandler, TokenStreamingHandler
from haystack.nodes.prompt.invocation_layer.open_ai import OpenAIInvocationLayer
from haystack.nodes.prompt.invocation_layer.utils import has_azure_parameters
from haystack.utils.openai_utils import openai_request, _check_openai_finish_reason, count_openai_tokens_messages
logger = logging.getLogger(__name__)
class ChatGPTInvocationLayer(OpenAIInvocationLayer):
"""
ChatGPT Invocation Layer
PromptModelInvocationLayer implementation for OpenAI's GPT-3 ChatGPT API. Invocations are made using REST API.
See [OpenAI ChatGPT API](https://platform.openai.com/docs/guides/chat) for more details.
Note: kwargs other than init parameter names are ignored to enable reflective construction of the class
as many variants of PromptModelInvocationLayer are possible and they may have different parameters.
"""
def __init__(
self,
api_key: str,
model_name_or_path: str = "gpt-3.5-turbo",
max_length: Optional[int] = 500,
api_base: str = "https://api.openai.com/v1",
**kwargs,
):
"""
Creates an instance of ChatGPTInvocationLayer for OpenAI's GPT-3.5 GPT-4 models.
:param model_name_or_path: The name or path of the underlying model.
:param max_length: The maximum number of tokens the output text can have.
:param api_key: The OpenAI API key.
:param api_base: The OpenAI API Base url, defaults to `https://api.openai.com/v1`.
:param kwargs: Additional keyword arguments passed to the underlying model.
[See OpenAI documentation](https://platform.openai.com/docs/api-reference/chat).
Note: additional model argument moderate_content will filter input and generated answers for potentially
sensitive content using the [OpenAI Moderation API](https://platform.openai.com/docs/guides/moderation)
if set. If the input or answers are flagged, an empty list is returned in place of the answers.
"""
super().__init__(api_key, model_name_or_path, max_length, api_base=api_base, **kwargs)
def _execute_openai_request(
self, prompt: Union[str, List[Dict]], base_payload: Dict, kwargs_with_defaults: Dict, stream: bool
):
"""
For more details, see [OpenAI ChatGPT API reference](https://platform.openai.com/docs/api-reference/chat).
"""
if isinstance(prompt, str):
messages = [{"role": "user", "content": prompt}]
elif isinstance(prompt, list) and len(prompt) > 0 and isinstance(prompt[0], dict):
messages = prompt
else:
raise ValueError(
f"The prompt format is different than what the model expects. "
f"The model {self.model_name_or_path} requires either a string or messages in the ChatML format. "
f"For more details, see this [GitHub discussion](https://github.com/openai/openai-python/blob/main/chatml.md)."
)
extra_payload = {"messages": messages}
payload = {**base_payload, **extra_payload}
if not stream:
response = openai_request(METHOD_NAME=self.METHOD_NAME, headers=self.headers, payload=payload)
_check_openai_finish_reason(result=response, payload=payload)
assistant_response = [choice["message"]["content"].strip() for choice in response["choices"]]
else:
response = openai_request(
METHOD_NAME=self.METHOD_NAME, headers=self.headers, payload=payload, read_response=False, stream=True
)
handler: TokenStreamingHandler = kwargs_with_defaults.pop("stream_handler", DefaultTokenStreamingHandler())
assistant_response = self._process_streaming_response(response=response, stream_handler=handler)
# Although ChatGPT generates text until stop words are encountered, unfortunately it includes the stop word
# We want to exclude it to be consistent with other invocation layers
if "stop" in kwargs_with_defaults and kwargs_with_defaults["stop"] is not None:
stop_words = kwargs_with_defaults["stop"]
for idx, _ in enumerate(assistant_response):
for stop_word in stop_words:
assistant_response[idx] = assistant_response[idx].replace(stop_word, "").strip()
return assistant_response
def _extract_token(self, event_data: Dict[str, Any]):
delta = event_data["choices"][0]["delta"]
if "content" in delta:
return delta["content"]
return None
def _ensure_token_limit(self, prompt: Union[str, List[Dict[str, str]]]) -> Union[str, List[Dict[str, str]]]:
"""Make sure the length of the prompt and answer is within the max tokens limit of the model.
If needed, truncate the prompt text so that it fits within the limit.
:param prompt: Prompt text to be sent to the generative model.
"""
if isinstance(prompt, str):
messages = [{"role": "user", "content": prompt}]
elif isinstance(prompt, list) and len(prompt) > 0 and isinstance(prompt[0], dict):
messages = prompt
n_message_tokens = count_openai_tokens_messages(messages, self._tokenizer)
n_answer_tokens = self.max_length
if (n_message_tokens + n_answer_tokens) <= self.max_tokens_limit:
return prompt
if isinstance(prompt, str):
tokenized_prompt = self._tokenizer.encode(prompt)
n_other_tokens = n_message_tokens - len(tokenized_prompt)
truncated_prompt_length = self.max_tokens_limit - n_answer_tokens - n_other_tokens
logger.warning(
"The prompt has been truncated from %s tokens to %s tokens so that the prompt length and "
"answer length (%s tokens) fit within the max token limit (%s tokens). "
"Reduce the length of the prompt to prevent it from being cut off.",
len(tokenized_prompt),
truncated_prompt_length,
n_answer_tokens,
self.max_tokens_limit,
)
truncated_prompt = self._tokenizer.decode(tokenized_prompt[:truncated_prompt_length])
return truncated_prompt
else:
# TODO: support truncation when there is a chat history
raise ValueError(
f"The prompt or the messages are too long ({n_message_tokens} tokens). "
f"The length of the prompt or messages and the answer ({n_answer_tokens} tokens) should be within the max "
f"token limit ({self.max_tokens_limit} tokens). "
f"Reduce the length of the prompt or messages."
)
@property
def METHOD_NAME(self) -> str:
return f"{self.api_base}/chat/completions"
@classmethod
def supports(cls, model_name_or_path: str, **kwargs) -> bool:
valid_model = any(m for m in ["gpt-3.5-turbo", "gpt-4"] if m in model_name_or_path)
return valid_model and not has_azure_parameters(**kwargs) |
298,649 | is coroutine | from __future__ import absolute_import
import re
import sys
import copy
import logging
from . import tools
from datetime import date, datetime
import enum
import six
logger = logging.getLogger(__name__)
MDS_URI_PREFIX = 'https://storage.yandex-team.ru/get-devtools/'
def apply(func, value, apply_to_keys=False):
"""
Applies func to every possible member of value
:param value: could be either a primitive object or a complex one (list, dicts)
:param func: func to be applied
:return:
"""
def _apply(func, value, value_path):
if value_path is None:
value_path = []
if isinstance(value, list) or isinstance(value, tuple):
res = []
for ind, item in enumerate(value):
path = copy.copy(value_path)
path.append(ind)
res.append(_apply(func, item, path))
elif isinstance(value, dict):
if is_external(value):
# this is a special serialized object pointing to some external place
res = func(value, value_path)
else:
res = {}
for key, val in sorted(value.items(), key=lambda dict_item: dict_item[0]):
path = copy.copy(value_path)
path.append(key)
res[_apply(func, key, path) if apply_to_keys else key] = _apply(func, val, path)
else:
res = func(value, value_path)
return res
return _apply(func, value, None)
def METHOD_NAME(val):
if sys.version_info[0] < 3:
return False
else:
import asyncio
return asyncio.iscoroutinefunction(val) or asyncio.iscoroutine(val)
def serialize(value):
"""
Serialize value to json-convertible object
Ensures that all components of value can be serialized to json
:param value: object to be serialized
"""
def _serialize(val, _):
if val is None:
return val
if isinstance(val, six.string_types) or isinstance(val, bytes):
return tools.to_utf8(val)
if isinstance(val, enum.Enum):
return str(val)
if isinstance(val, six.integer_types) or type(val) in [float, bool]:
return val
if is_external(val):
return dict(val)
if isinstance(val, (date, datetime)):
return repr(val)
if METHOD_NAME(val):
return None
raise ValueError("Cannot serialize value '{}' of type {}".format(val, type(val)))
return apply(_serialize, value, apply_to_keys=True)
def is_external(value):
return isinstance(value, dict) and "uri" in value.keys()
class ExternalSchema(object):
File = "file"
SandboxResource = "sbr"
Delayed = "delayed"
HTTP = "http"
class CanonicalObject(dict):
def __iter__(self):
raise TypeError("Iterating canonical object is not implemented")
class ExternalDataInfo(object):
def __init__(self, data):
assert is_external(data)
self._data = data
def __str__(self):
type_str = "File" if self.is_file else "Sandbox resource"
return "{}({})".format(type_str, self.path)
def __repr__(self):
return str(self)
@property
def uri(self):
return self._data["uri"]
@property
def checksum(self):
return self._data.get("checksum")
@property
def is_file(self):
return self.uri.startswith(ExternalSchema.File)
@property
def is_sandbox_resource(self):
return self.uri.startswith(ExternalSchema.SandboxResource)
@property
def is_delayed(self):
return self.uri.startswith(ExternalSchema.Delayed)
@property
def is_http(self):
return self.uri.startswith(ExternalSchema.HTTP)
@property
def path(self):
if self.uri.count("://") != 1:
logger.error("Invalid external data uri: '%s'", self.uri)
return self.uri
_, path = self.uri.split("://")
return path
def get_mds_key(self):
assert self.is_http
m = re.match(re.escape(MDS_URI_PREFIX) + r'(.*?)($|#)', self.uri)
if m:
return m.group(1)
raise AssertionError("Failed to extract mds key properly from '{}'".format(self.uri))
@property
def size(self):
return self._data.get("size")
def serialize(self):
return self._data
@classmethod
def _serialize(cls, schema, path, checksum=None, attrs=None):
res = CanonicalObject({"uri": "{}://{}".format(schema, path)})
if checksum:
res["checksum"] = checksum
if attrs:
res.update(attrs)
return res
@classmethod
def serialize_file(cls, path, checksum=None, diff_tool=None, local=False, diff_file_name=None, diff_tool_timeout=None, size=None):
attrs = {}
if diff_tool:
attrs["diff_tool"] = diff_tool
if local:
attrs["local"] = local
if diff_file_name:
attrs["diff_file_name"] = diff_file_name
if diff_tool_timeout:
attrs["diff_tool_timeout"] = diff_tool_timeout
if size is not None:
attrs["size"] = size
return cls._serialize(ExternalSchema.File, path, checksum, attrs=attrs)
@classmethod
def serialize_resource(cls, id, checksum=None):
return cls._serialize(ExternalSchema.SandboxResource, id, checksum)
@classmethod
def serialize_delayed(cls, upload_id, checksum):
return cls._serialize(ExternalSchema.Delayed, upload_id, checksum)
def get(self, key, default=None):
return self._data.get(key, default) |
298,650 | send request | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import ApplicationInsightsManagementClientConfiguration
from .operations import ComponentsOperations, Operations, ProactiveDetectionConfigurationsOperations, WebTestsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ApplicationInsightsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Composite Swagger for Application Insights Management Client.
:ivar proactive_detection_configurations: ProactiveDetectionConfigurationsOperations operations
:vartype proactive_detection_configurations:
azure.mgmt.applicationinsights.v2018_05_01_preview.aio.operations.ProactiveDetectionConfigurationsOperations
:ivar components: ComponentsOperations operations
:vartype components:
azure.mgmt.applicationinsights.v2018_05_01_preview.aio.operations.ComponentsOperations
:ivar operations: Operations operations
:vartype operations:
azure.mgmt.applicationinsights.v2018_05_01_preview.aio.operations.Operations
:ivar web_tests: WebTestsOperations operations
:vartype web_tests:
azure.mgmt.applicationinsights.v2018_05_01_preview.aio.operations.WebTestsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2018-05-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ApplicationInsightsManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.proactive_detection_configurations = ProactiveDetectionConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.components = ComponentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.web_tests = WebTestsOperations(self._client, self._config, self._serialize, self._deserialize)
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ApplicationInsightsManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details) |
298,651 | from dict | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from cloudharness_model.models.base_model_ import Model
from cloudharness_model import util
class UserGroup(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, access=None, attributes=None, client_roles=None, id=None, name=None, path=None, realm_roles=None, sub_groups=None): # noqa: E501
"""UserGroup - a model defined in OpenAPI
:param access: The access of this UserGroup. # noqa: E501
:type access: Dict[str, object]
:param attributes: The attributes of this UserGroup. # noqa: E501
:type attributes: Dict[str, object]
:param client_roles: The client_roles of this UserGroup. # noqa: E501
:type client_roles: Dict[str, object]
:param id: The id of this UserGroup. # noqa: E501
:type id: str
:param name: The name of this UserGroup. # noqa: E501
:type name: str
:param path: The path of this UserGroup. # noqa: E501
:type path: str
:param realm_roles: The realm_roles of this UserGroup. # noqa: E501
:type realm_roles: List[str]
:param sub_groups: The sub_groups of this UserGroup. # noqa: E501
:type sub_groups: List[UserGroup]
"""
self.openapi_types = {
'access': Dict[str, object],
'attributes': Dict[str, object],
'client_roles': Dict[str, object],
'id': str,
'name': str,
'path': str,
'realm_roles': List[str],
'sub_groups': List[UserGroup]
}
self.attribute_map = {
'access': 'access',
'attributes': 'attributes',
'client_roles': 'clientRoles',
'id': 'id',
'name': 'name',
'path': 'path',
'realm_roles': 'realmRoles',
'sub_groups': 'subGroups'
}
self._access = access
self._attributes = attributes
self._client_roles = client_roles
self._id = id
self._name = name
self._path = path
self._realm_roles = realm_roles
self._sub_groups = sub_groups
@classmethod
def METHOD_NAME(cls, dikt) -> 'UserGroup':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The UserGroup of this UserGroup. # noqa: E501
:rtype: UserGroup
"""
return util.deserialize_model(dikt, cls)
@property
def access(self):
"""Gets the access of this UserGroup.
:return: The access of this UserGroup.
:rtype: Dict[str, object]
"""
return self._access
@access.setter
def access(self, access):
"""Sets the access of this UserGroup.
:param access: The access of this UserGroup.
:type access: Dict[str, object]
"""
self._access = access
@property
def attributes(self):
"""Gets the attributes of this UserGroup.
# noqa: E501
:return: The attributes of this UserGroup.
:rtype: Dict[str, object]
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""Sets the attributes of this UserGroup.
# noqa: E501
:param attributes: The attributes of this UserGroup.
:type attributes: Dict[str, object]
"""
self._attributes = attributes
@property
def client_roles(self):
"""Gets the client_roles of this UserGroup.
:return: The client_roles of this UserGroup.
:rtype: Dict[str, object]
"""
return self._client_roles
@client_roles.setter
def client_roles(self, client_roles):
"""Sets the client_roles of this UserGroup.
:param client_roles: The client_roles of this UserGroup.
:type client_roles: Dict[str, object]
"""
self._client_roles = client_roles
@property
def id(self):
"""Gets the id of this UserGroup.
:return: The id of this UserGroup.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this UserGroup.
:param id: The id of this UserGroup.
:type id: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this UserGroup.
:return: The name of this UserGroup.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UserGroup.
:param name: The name of this UserGroup.
:type name: str
"""
self._name = name
@property
def path(self):
"""Gets the path of this UserGroup.
:return: The path of this UserGroup.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this UserGroup.
:param path: The path of this UserGroup.
:type path: str
"""
self._path = path
@property
def realm_roles(self):
"""Gets the realm_roles of this UserGroup.
:return: The realm_roles of this UserGroup.
:rtype: List[str]
"""
return self._realm_roles
@realm_roles.setter
def realm_roles(self, realm_roles):
"""Sets the realm_roles of this UserGroup.
:param realm_roles: The realm_roles of this UserGroup.
:type realm_roles: List[str]
"""
self._realm_roles = realm_roles
@property
def sub_groups(self):
"""Gets the sub_groups of this UserGroup.
:return: The sub_groups of this UserGroup.
:rtype: List[UserGroup]
"""
return self._sub_groups
@sub_groups.setter
def sub_groups(self, sub_groups):
"""Sets the sub_groups of this UserGroup.
:param sub_groups: The sub_groups of this UserGroup.
:type sub_groups: List[UserGroup]
"""
self._sub_groups = sub_groups |
298,652 | get channel | # NI_DAQ.py, National Instruments Data AcQuisition instrument driver
# Reinier Heeres <reinier@heeres.eu>, 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import qkit
from qkit.core.instrument_base import Instrument
import types
from qkit.instruments import dll_support_nidaq as nidaq
def METHOD_NAME(devchan):
if not '/' in devchan:
return devchan
parts = devchan.split('/')
if len(parts) != 2:
return devchan
return parts[1]
class NI_DAQ(Instrument):
def __init__(self, name, id):
Instrument.__init__(self, name, tags=['physical'])
self._id = id
for ch_in in self._get_input_channels():
ch_in = METHOD_NAME(ch_in)
self.add_parameter(ch_in,
flags=Instrument.FLAG_GET,
type=float,
units='V',
tags=['measure'],
get_func=self.do_get_input,
channel=ch_in)
for ch_out in self._get_output_channels():
ch_out = METHOD_NAME(ch_out)
self.add_parameter(ch_out,
flags=Instrument.FLAG_SET,
type=float,
units='V',
tags=['sweep'],
set_func=self.do_set_output,
channel=ch_out)
for ch_ctr in self._get_counter_channels():
ch_ctr = METHOD_NAME(ch_ctr)
self.add_parameter(ch_ctr,
flags=Instrument.FLAG_GET,
type=int,
units='#',
tags=['measure'],
get_func=self.do_get_counter,
channel=ch_ctr)
self.add_parameter(ch_ctr + "_src",
flags=Instrument.FLAG_SET | Instrument.FLAG_SOFTGET,
type=str,
set_func=self.do_set_counter_src,
channel=ch_ctr)
self.add_parameter('chan_config',
flags=Instrument.FLAG_SET | Instrument.FLAG_SOFTGET,
type=str,
option_list=('Default', 'RSE', 'NRSE', 'Diff', 'PseudoDiff'))
self.add_parameter('count_time',
flags=Instrument.FLAG_SET | Instrument.FLAG_SOFTGET,
type=float,
units='s')
self.add_function('reset')
self.add_function('digital_out')
self.reset()
self.set_chan_config('RSE')
self.set_count_time(0.1)
self.get_all()
def get_all(self):
ch_in = [METHOD_NAME(ch) for ch in self._get_input_channels()]
self.get(ch_in)
def reset(self):
'''Reset device.'''
nidaq.reset_device(self._id)
def _get_input_channels(self):
return nidaq.get_physical_input_channels(self._id)
def _get_output_channels(self):
return nidaq.get_physical_output_channels(self._id)
def _get_counter_channels(self):
return nidaq.get_physical_counter_channels(self._id)
def do_get_input(self, channel):
devchan = '%s/%s' % (self._id, channel)
return nidaq.read(devchan, config=self._chan_config)
def do_set_output(self, val, channel):
devchan = '%s/%s' % (self._id, channel)
return nidaq.write(devchan, val)
def do_set_chan_config(self, val):
self._chan_config = val
def do_set_count_time(self, val):
self._count_time = val
def do_get_counter(self, channel):
devchan = '%s/%s' % (self._id, channel)
src = self.get(channel + "_src")
if src is not None and src != '':
src = '/%s/%s' % (self._id, src)
return nidaq.read_counter(devchan, src=src, freq=1 / self._count_time)
def read_counters(self, channels):
chans = []
srcs = []
for chan in channels:
chans.append('%s/%s' % (self._id, chan))
srcs.append(self.get(chan + "_src"))
return nidaq.read_counters(chans, src=srcs, freq=1.0 / self._count_time)
# Dummy
def do_set_counter_src(self, val, channel):
return True
def digital_out(self, lines, val):
devchan = '%s/%s' % (self._id, lines)
return nidaq.write_dig_port8(devchan, val)
def detect_instruments():
'''Refresh NI DAQ instrument list.'''
for name in nidaq.get_device_names():
qkit.instruments.create('NI%s' % name, 'NI_DAQ', id=name) |
298,653 | clean studio edits | #
# Copyright (c) 2014-2015 Harvard, edX & OpenCraft
#
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute and/or modify this program under the terms of
# the GNU Affero General Public License (AGPL) as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version of the AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
#
# Imports ###########################################################
from django.utils.html import strip_tags
from lxml import etree
from web_fragments.fragment import Fragment
from xblock.core import XBlock
from xblock.fields import List, Scope, String
from xblock.validation import ValidationMessage
from xblockutils.resources import ResourceLoader
from xblockutils.studio_editable import StudioEditableXBlockMixin
from problem_builder.mixins import (ExpandStaticURLMixin,
XBlockWithTranslationServiceMixin)
loader = ResourceLoader(__name__)
# Make '_' a no-op so we can scrape strings
def _(text):
return text
# Classes ###########################################################
@XBlock.needs("i18n")
class TipBlock(StudioEditableXBlockMixin, XBlockWithTranslationServiceMixin, XBlock, ExpandStaticURLMixin):
"""
Each choice can define a tip depending on selection
"""
content = String(
display_name=_("Content"),
help=_("Text of the tip to show if the student chooses this tip's associated choice[s]"),
scope=Scope.content,
default=""
)
values = List(
display_name=_("For Choices"),
help=_("List of choices for which to display this tip"),
scope=Scope.content,
default=[],
list_values_provider=lambda self: self.get_parent().human_readable_choices,
list_style='set', # Underered, unique items. Affects the UI editor.
)
width = String(
display_name=_("Width"),
help=_("Width of the tip popup (e.g. '400px')"),
scope=Scope.content,
default=''
)
height = String(
display_name=_("Height"),
help=_("Height of the tip popup (e.g. '200px')"),
scope=Scope.content,
default=''
)
editable_fields = ('values', 'content', 'width', 'height')
@property
def display_name_with_default(self):
values_list = []
for entry in self.get_parent().human_readable_choices:
if entry["value"] in self.values:
display_name = strip_tags(entry["display_name"]) # Studio studio_view can't handle html in display_name
if len(display_name) > 20:
display_name = display_name[:20] + '…'
values_list.append(display_name)
return self._("Tip for {list_of_choices}").format(list_of_choices=", ".join(values_list))
def mentoring_view(self, context=None):
""" Render this XBlock within a mentoring block. """
html = loader.render_django_template("templates/html/tip.html", {
'content': self.content,
'width': self.width,
'height': self.height,
})
return Fragment(html)
def student_view_data(self, context=None):
return {
'display_name': self.display_name_with_default,
'content': self.expand_static_url(self.content),
'for_choices': self.values,
}
def student_view(self, context=None):
""" Normal view of this XBlock, identical to mentoring_view """
return self.mentoring_view(context)
def METHOD_NAME(self, data):
"""
Clean up the edits during studio_view save
"""
if "values" in data:
data["values"] = list(str(v) for v in set(data["values"]))
def validate_field_data(self, validation, data):
"""
Validate this block's field data.
"""
super().validate_field_data(validation, data)
def add_error(msg):
validation.add(ValidationMessage(ValidationMessage.ERROR, msg))
try:
valid_values = set(self.get_parent().all_choice_values)
except Exception:
pass
else:
for dummy in set(data.values) - valid_values:
add_error(self._("A choice selected for this tip does not exist."))
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""
Construct this XBlock from the given XML node.
"""
block = runtime.construct_xblock_from_class(cls, keys)
block.values = cls.values.from_string(node.get('values', '[]'))
block.width = node.get('width', '')
block.height = node.get('height', '')
block.content = str(node.text or "")
for child in node:
block.content += etree.tostring(child, encoding='unicode')
return block |
298,654 | set 403 | from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from django.conf import settings
from datetime import datetime, timedelta, time
from urllib.parse import urlparse, parse_qsl, urlencode
import json
import jwt
from postgresqleu.util.crypto import rsa_get_jwk_struct
from postgresqleu.util.random import generate_random_token
from postgresqleu.util.decorators import global_login_exempt
from .util import get_conference_or_404, activate_conference_timezone, reglog
from .models import ConferenceRegistration, ConferenceRegistrationTemporaryToken
@global_login_exempt
def jwk_json(request, confname):
conference = get_conference_or_404(confname)
if not conference.key_public:
raise Http404()
r = HttpResponse(json.dumps(
{
'keys': [
rsa_get_jwk_struct(conference.key_public, '{}01'.format(conference.urlname)),
]
}
), content_type='application/json')
# Everybody is allowed to get the JWKs
r['Access-Control-Allow-Origin'] = '*'
return r
@login_required
def conference_temp_token(request, confname):
redir = request.GET.get('redir', None)
if not redir:
return HttpResponse("Mandatory parameter missing", status=404)
redir = urlparse(redir)
# Create, or replace, a temporary token for this login, which can later be exchanged for a full JWT.
conference = get_conference_or_404(confname)
if not conference.key_public:
return HttpResponse("Conference key not found", status=404)
# Explicitly compare scheme/location/path, but *not* the querystring.
if redir._replace(query=None, fragment=None).geturl() not in conference.web_origins.split(','):
return HttpResponse("Forbidden redirect URL", status=403)
try:
reg = ConferenceRegistration.objects.get(conference=conference, attendee=request.user)
except ConferenceRegistration.DoesNotExist:
return HttpResponse("You are not registered for this conference", status=403, content_type='text/plain')
if not reg.payconfirmedat:
return HttpResponse("Not confirmed for this conference", status=403, conten_type='text/plain')
with transaction.atomic():
# If there is an existing token for this user, just remove it.
ConferenceRegistrationTemporaryToken.objects.filter(reg=reg).delete()
# Create a new one
t = ConferenceRegistrationTemporaryToken(
reg=reg,
token=generate_random_token(),
expires=timezone.now() + timedelta(minutes=5),
)
t.save()
reglog(reg, 'Issued temporary token', request.user)
# If there are any parameters included in the redirect, we just append ours to it
param = dict(parse_qsl(redir.query))
param['token'] = t.token
return HttpResponseRedirect(redir._replace(query=urlencode(param)).geturl())
class CorsResponse(HttpResponse):
def __init__(self, *args, **kwargs):
origin = kwargs.pop('origin')
allowed = kwargs.pop('allowed')
super().__init__(*args, **kwargs)
if origin:
if allowed:
# Origin is specified, so validate it against it
for o in allowed.split(','):
if o == origin:
matched_origin = o
break
else:
return self.METHOD_NAME("Origin not authorized")
else:
# If no origin is configured, we're going to use our own sitebase only
if origin != settings.SITEBASE:
return self.METHOD_NAME("No authorized origins configured")
matched_origin = settings.SITEBASE
else:
matched_origin = settings.SITEBASE
self['Access-Control-Allow-Origin'] = matched_origin
def METHOD_NAME(self, msg):
self.content = msg
self.status = 403
@transaction.atomic
@csrf_exempt
@global_login_exempt
@require_http_methods(["POST"])
def conference_jwt(request, confname):
temptoken = get_object_or_404(ConferenceRegistrationTemporaryToken, token=request.POST.get('token', None))
reg = temptoken.reg
activate_conference_timezone(reg.conference)
if temptoken.expires < timezone.now():
# Remove the old token as well
temptoken.delete()
return CorsResponse("Token expired", status=403, origin=request.headers.get('Origin', ''), allowed=reg.conference.web_origins)
# Token was valid -- so the first thing we do is remove it
temptoken.delete()
reglog(reg, 'Converted temporary to permanent token')
# We allow caching of the token until a full day after the conference. This may not be the
# smartest ever, but it'll do for now and reduce the reliance on this endpoint being
# available during an event.
expire = datetime.combine(reg.conference.enddate, time(23, 59)) + timedelta(days=1)
# Else we're good to go to generate the JWT
r = CorsResponse(jwt.encode(
{
'iat': datetime.utcnow(),
'exp': expire,
'iss': settings.SITEBASE,
'attendee': {
'name': reg.fullname,
'email': reg.email,
'company': reg.company,
'nick': reg.nick,
'twittername': reg.twittername,
'shareemail': reg.shareemail,
'regid': reg.id,
'country': reg.countryname,
'volunteer': reg.is_volunteer,
'admin': reg.is_admin,
}
},
reg.conference.key_private,
algorithm='RS256',
headers={
'kid': '{}01'.format(reg.conference.urlname),
},
), content_type='application/jwt', origin=request.headers.get('Origin', ''), allowed=reg.conference.web_origins)
return r |
298,655 | rpccall | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
A test for RPC users with restricted permissions
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
str_to_b64str,
)
import http.client
import urllib.parse
def METHOD_NAME(node, user, method):
url = urllib.parse.urlparse(node.url)
headers = {"Authorization": "Basic " + str_to_b64str('{}:{}'.format(user[0], user[3]))}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "' + method + '"}', headers)
resp = conn.getresponse()
conn.close()
return resp
class RPCWhitelistTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
# 0 => Username
# 1 => Password (Hashed)
# 2 => Permissions
# 3 => Password Plaintext
self.users = [
["user1", "50358aa884c841648e0700b073c32b2e$b73e95fff0748cc0b517859d2ca47d9bac1aa78231f3e48fa9222b612bd2083e", "getbestblockhash,getblockcount,", "12345"],
["user2", "8650ba41296f62092377a38547f361de$4620db7ba063ef4e2f7249853e9f3c5c3592a9619a759e3e6f1c63f2e22f1d21", "getblockcount", "54321"]
]
# For exceptions
self.strange_users = [
# Test empty
["strangedude", "62d67dffec03836edd698314f1b2be62$c2fb4be29bb0e3646298661123cf2d8629640979cabc268ef05ea613ab54068d", ":", "s7R4nG3R7H1nGZ"],
["strangedude2", "575c012c7fe4b1e83b9d809412da3ef7$09f448d0acfc19924dd62ecb96004d3c2d4b91f471030dfe43c6ea64a8f658c1", "", "s7R4nG3R7H1nGZ"],
# Test trailing comma
["strangedude3", "23189c561b5975a56f4cf94030495d61$3a2f6aac26351e2257428550a553c4c1979594e36675bbd3db692442387728c0", ":getblockcount,", "s7R4nG3R7H1nGZ"],
# Test overwrite
["strangedude4", "990c895760a70df83949e8278665e19a$8f0906f20431ff24cb9e7f5b5041e4943bdf2a5c02a19ef4960dcf45e72cde1c", ":getblockcount, getbestblockhash", "s7R4nG3R7H1nGZ"],
["strangedude4", "990c895760a70df83949e8278665e19a$8f0906f20431ff24cb9e7f5b5041e4943bdf2a5c02a19ef4960dcf45e72cde1c", ":getblockcount", "s7R4nG3R7H1nGZ"],
# Testing the same permission twice
["strangedude5", "d12c6e962d47a454f962eb41225e6ec8$2dd39635b155536d3c1a2e95d05feff87d5ba55f2d5ff975e6e997a836b717c9", ":getblockcount,getblockcount", "s7R4nG3R7H1nGZ"]
]
# These commands shouldn't be allowed for any user to test failures
self.never_allowed = ["getnetworkinfo"]
with open(self.nodes[0].datadir_path / "bitcoin.conf", "a", encoding="utf8") as f:
f.write("\nrpcwhitelistdefault=0\n")
for user in self.users:
f.write("rpcauth=" + user[0] + ":" + user[1] + "\n")
f.write("rpcwhitelist=" + user[0] + ":" + user[2] + "\n")
# Special cases
for strangedude in self.strange_users:
f.write("rpcauth=" + strangedude[0] + ":" + strangedude[1] + "\n")
f.write("rpcwhitelist=" + strangedude[0] + strangedude[2] + "\n")
self.restart_node(0)
for user in self.users:
permissions = user[2].replace(" ", "").split(",")
# Pop all empty items
i = 0
while i < len(permissions):
if permissions[i] == '':
permissions.pop(i)
i += 1
for permission in permissions:
self.log.info("[" + user[0] + "]: Testing a permitted permission (" + permission + ")")
assert_equal(200, METHOD_NAME(self.nodes[0], user, permission).status)
for permission in self.never_allowed:
self.log.info("[" + user[0] + "]: Testing a non permitted permission (" + permission + ")")
assert_equal(403, METHOD_NAME(self.nodes[0], user, permission).status)
# Now test the strange users
for permission in self.never_allowed:
self.log.info("Strange test 1")
assert_equal(403, METHOD_NAME(self.nodes[0], self.strange_users[0], permission).status)
for permission in self.never_allowed:
self.log.info("Strange test 2")
assert_equal(403, METHOD_NAME(self.nodes[0], self.strange_users[1], permission).status)
self.log.info("Strange test 3")
assert_equal(200, METHOD_NAME(self.nodes[0], self.strange_users[2], "getblockcount").status)
self.log.info("Strange test 4")
assert_equal(403, METHOD_NAME(self.nodes[0], self.strange_users[3], "getbestblockhash").status)
self.log.info("Strange test 5")
assert_equal(200, METHOD_NAME(self.nodes[0], self.strange_users[4], "getblockcount").status)
if __name__ == "__main__":
RPCWhitelistTest().main() |
298,656 | listtmpdir | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import time
import unittest
from shutil import rmtree
from tempfile import mkdtemp
import mozunit
from mozbuild import artifact_cache
from mozbuild.artifact_cache import ArtifactCache
CONTENTS = {
"http://server/foo": b"foo",
"http://server/bar": b"bar" * 400,
"http://server/qux": b"qux" * 400,
"http://server/fuga": b"fuga" * 300,
"http://server/hoge": b"hoge" * 300,
"http://server/larger": b"larger" * 3000,
}
class FakeResponse(object):
def __init__(self, content):
self._content = content
@property
def headers(self):
return {"Content-length": str(len(self._content))}
def iter_content(self, chunk_size):
content = memoryview(self._content)
while content:
yield content[:chunk_size]
content = content[chunk_size:]
def raise_for_status(self):
pass
def close(self):
pass
class FakeSession(object):
def get(self, url, stream=True):
assert stream is True
return FakeResponse(CONTENTS[url])
class TestArtifactCache(unittest.TestCase):
def setUp(self):
self.min_cached_artifacts = artifact_cache.MIN_CACHED_ARTIFACTS
self.max_cached_artifacts_size = artifact_cache.MAX_CACHED_ARTIFACTS_SIZE
artifact_cache.MIN_CACHED_ARTIFACTS = 2
artifact_cache.MAX_CACHED_ARTIFACTS_SIZE = 4096
self._real_utime = os.utime
os.utime = self.utime
self.timestamp = time.time() - 86400
self.tmpdir = mkdtemp()
def tearDown(self):
rmtree(self.tmpdir)
artifact_cache.MIN_CACHED_ARTIFACTS = self.min_cached_artifacts
artifact_cache.MAX_CACHED_ARTIFACTS_SIZE = self.max_cached_artifacts_size
os.utime = self._real_utime
def utime(self, path, times):
if times is None:
# Ensure all downloaded files have a different timestamp
times = (self.timestamp, self.timestamp)
self.timestamp += 2
self._real_utime(path, times)
def METHOD_NAME(self):
return [p for p in os.listdir(self.tmpdir) if p != ".metadata_never_index"]
def test_artifact_cache_persistence(self):
cache = ArtifactCache(self.tmpdir)
cache._download_manager.session = FakeSession()
path = cache.fetch("http://server/foo")
expected = [os.path.basename(path)]
self.assertEqual(self.METHOD_NAME(), expected)
path = cache.fetch("http://server/bar")
expected.append(os.path.basename(path))
self.assertEqual(sorted(self.METHOD_NAME()), sorted(expected))
# We're downloading more than the cache allows us, but since it's all
# in the same session, no purge happens.
path = cache.fetch("http://server/qux")
expected.append(os.path.basename(path))
self.assertEqual(sorted(self.METHOD_NAME()), sorted(expected))
path = cache.fetch("http://server/fuga")
expected.append(os.path.basename(path))
self.assertEqual(sorted(self.METHOD_NAME()), sorted(expected))
cache = ArtifactCache(self.tmpdir)
cache._download_manager.session = FakeSession()
# Downloading a new file in a new session purges the oldest files in
# the cache.
path = cache.fetch("http://server/hoge")
expected.append(os.path.basename(path))
expected = expected[2:]
self.assertEqual(sorted(self.METHOD_NAME()), sorted(expected))
# Downloading a file already in the cache leaves the cache untouched
cache = ArtifactCache(self.tmpdir)
cache._download_manager.session = FakeSession()
path = cache.fetch("http://server/qux")
self.assertEqual(sorted(self.METHOD_NAME()), sorted(expected))
# bar was purged earlier, re-downloading it should purge the oldest
# downloaded file, which at this point would be qux, but we also
# re-downloaded it in the mean time, so the next one (fuga) should be
# the purged one.
cache = ArtifactCache(self.tmpdir)
cache._download_manager.session = FakeSession()
path = cache.fetch("http://server/bar")
expected.append(os.path.basename(path))
expected = [p for p in expected if "fuga" not in p]
self.assertEqual(sorted(self.METHOD_NAME()), sorted(expected))
# Downloading one file larger than the cache size should still leave
# MIN_CACHED_ARTIFACTS files.
cache = ArtifactCache(self.tmpdir)
cache._download_manager.session = FakeSession()
path = cache.fetch("http://server/larger")
expected.append(os.path.basename(path))
expected = expected[-2:]
self.assertEqual(sorted(self.METHOD_NAME()), sorted(expected))
if __name__ == "__main__":
mozunit.main() |
298,657 | test run | import io
from pavilion import arguments
from pavilion import commands
from pavilion import plugins
from pavilion.status_file import STATES
from pavilion.unittest import PavTestCase
class RunCmdTests(PavTestCase):
def setUp(self):
plugins.initialize_plugins(self.pav_cfg)
run_cmd = commands.get_command('run')
run_cmd.silence()
def METHOD_NAME(self):
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'run',
'-H', 'this',
'hello_world.world',
'hello_world.narf'
])
run_cmd = commands.get_command(args.command_name)
self.assertEqual(run_cmd.run(self.pav_cfg, args), 0)
def test_verbosity(self):
"""Run at all levels of verbosity, to make better explode."""
arg_parser = arguments.get_parser()
run_cmd = commands.get_command('run')
for arg_set in [
('run', '-H', 'this', 'hello_world'),
('run', '-v', 'QUIET', '-H', 'this', 'hello_world'),
('run', '-v', 'DYNAMIC', '-H', 'this', 'hello_world'),
('run', '-v', 'HIGH', '-H', 'this', 'hello_world'),
('run', '-v', 'MAX', '-H', 'this', 'hello_world'),]:
args = arg_parser.parse_args(arg_set)
self.assertEqual(run_cmd.run(self.pav_cfg, args), 0)
def test_multi_build(self):
"""Make sure we can build multiple simultanious builds on
both the front-end and the nodes."""
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'run',
'-H', 'this',
'build_parallel'
])
run_cmd = commands.get_command(args.command_name)
run_ret = run_cmd.run(self.pav_cfg, args)
run_cmd.outfile.seek(0)
self.assertEqual(run_ret, 0, msg=run_cmd.outfile.read())
for test in run_cmd.last_tests:
test.wait(timeout=10)
# Make sure we actually built separate builds
builds = [test.builder for test in run_cmd.last_tests]
build_names = set([b.name for b in builds])
self.assertEqual(len(build_names), 4)
for test in run_cmd.last_tests:
if test.skipped:
continue
self.assertEqual(test.results['result'], 'PASS',
msg='Test {} status: {}'
.format(test.id, test.status.current()))
def test_multi_build_fail(self):
"""Make sure we can build multiple simultanious builds on
both the front-end and the nodes."""
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'run',
'-H', 'this',
'build_parallel_fail'
])
run_cmd = commands.get_command(args.command_name)
self.assertNotEqual(run_cmd.run(self.pav_cfg, args), 0)
# Make sure we actually built separate builds
builds = [test.builder for test in run_cmd.last_tests]
build_names = set([b.name for b in builds])
self.assertEqual(len(build_names), 4)
run_cmd.last_series.wait()
statuses = set([test.status.current().state for test in run_cmd.last_tests])
self.assertEqual(statuses, {STATES.ABORTED, STATES.BUILD_FAILED})
self.assertTrue(all([test.complete for test in
run_cmd.last_tests]))
def test_build_parallel_lots(self):
"""Make sure building works beyond the parallel building limit."""
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'run',
'-H', 'this',
'build_parallel_lots'
])
run_cmd = commands.get_command(args.command_name)
run_ret = run_cmd.run(self.pav_cfg, args)
run_cmd.outfile.seek(0)
self.assertEqual(run_ret, 0, msg=run_cmd.outfile.read())
for test in run_cmd.last_tests:
test.wait(timeout=10)
# Make sure we actually built separate builds
builds = [test.builder for test in run_cmd.last_tests]
build_names = set([b.name for b in builds])
self.assertEqual(len(build_names), 8)
for test in run_cmd.last_tests:
self.assertEqual(test.results['result'], 'PASS',
msg='Test {} status: {}'
.format(test.id, test.status.current()))
def test_run_status(self):
"""Tests run command with status flag."""
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'run',
'-s',
'hello_world',
])
run_cmd = commands.get_command(args.command_name)
self.assertEqual(run_cmd.run(self.pav_cfg, args), 0)
def test_no_sched(self):
"""Check that we get a reasonable error for a non-available
scheduler."""
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'run', 'not_available'
])
run_cmd = commands.get_command(args.command_name)
self.assertNotEqual(run_cmd.run(self.pav_cfg, args), 0)
def test_run_repeat(self):
"""Check that run repeat functionality works as expected."""
# Check with repeat flag.
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'run', '--repeat', '3', 'hello_world.hello'
])
run_cmd = commands.get_command(args.command_name)
self.assertEqual(run_cmd.run(self.pav_cfg, args), 0, msg=run_cmd.clear_output())
self.assertEqual(len(run_cmd.last_tests), 3)
# Check with * notation.
args = arg_parser.parse_args([
'run', 'hello_world.hello*5'
])
self.assertEqual(run_cmd.run(self.pav_cfg, args), 0, msg=run_cmd.clear_output())
self.assertEqual(len(run_cmd.last_tests), 5)
args = arg_parser.parse_args([
'run', '5*hello_world.hello'
])
self.assertEqual(run_cmd.run(self.pav_cfg, args), 0)
self.assertEqual(len(run_cmd.last_tests), 5)
# Check with * notation and --repeat flag.
args = arg_parser.parse_args([
'run', '--repeat', '2', 'hello_world.hello*2'
])
self.assertEqual(run_cmd.run(self.pav_cfg, args), 0)
self.assertEqual(len(run_cmd.last_tests), 4)
# Check with invalid arguments
args = arg_parser.parse_args([
'run', 'hello_world.hello*two'
])
self.assertNotEqual(run_cmd.run(self.pav_cfg, args), 0)
def test_run_file(self):
"""Check that the -f argument for pav run works. """
arg_parser = arguments.get_parser()
# pass a collection name to -f (not an absolute path)
args = arg_parser.parse_args([
'run',
'-f', 'testlist.txt',
])
run_cmd = commands.get_command(args.command_name)
self.assertEqual(run_cmd.run(self.pav_cfg, args), 0) |
298,658 | test product media create mutation with media | import json
import os
from unittest.mock import patch
import graphene
import pytest
from .....graphql.tests.utils import get_graphql_content, get_multipart_request_body
from .....product import ProductMediaTypes
from .....product.error_codes import ProductErrorCode
from .....product.tests.utils import create_image, create_zip_file_with_image_ext
PRODUCT_MEDIA_CREATE_QUERY = """
mutation createProductMedia(
$product: ID!,
$image: Upload,
$mediaUrl: String,
$alt: String
) {
productMediaCreate(input: {
product: $product,
mediaUrl: $mediaUrl,
alt: $alt,
image: $image
}) {
product {
media {
url
alt
type
oembedData
}
}
errors {
code
field
}
}
}
"""
@patch("saleor.plugins.manager.PluginsManager.product_media_created")
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_product_media_create_mutation(
product_updated_mock,
product_media_created,
monkeypatch,
staff_api_client,
product,
permission_manage_products,
media_root,
):
# given
staff_api_client.user.user_permissions.add(permission_manage_products)
image_file, image_name = create_image()
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"alt": "",
"image": image_name,
}
body = get_multipart_request_body(
PRODUCT_MEDIA_CREATE_QUERY, variables, image_file, image_name
)
# when
response = staff_api_client.post_multipart(body)
get_graphql_content(response)
# then
product.refresh_from_db()
product_image = product.media.last()
assert product_image.image.file
img_name, format = os.path.splitext(image_file._name)
file_name = product_image.image.name
assert file_name != image_file._name
assert file_name.startswith(f"products/{img_name}")
assert file_name.endswith(format)
product_updated_mock.assert_called_once_with(product)
product_media_created.assert_called_once_with(product_image)
def test_product_media_create_mutation_without_file(
monkeypatch, staff_api_client, product, permission_manage_products, media_root
):
# given
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"image": "image name",
}
body = get_multipart_request_body(
PRODUCT_MEDIA_CREATE_QUERY, variables, file="", file_name="name"
)
# when
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
# then
errors = content["data"]["productMediaCreate"]["errors"]
assert errors[0]["field"] == "image"
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
@pytest.mark.vcr
def METHOD_NAME(
monkeypatch, staff_api_client, product, permission_manage_products, media_root
):
# given
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"mediaUrl": "https://www.youtube.com/watch?v=dQw4w9WgXcQ",
"alt": "",
}
body = get_multipart_request_body(
PRODUCT_MEDIA_CREATE_QUERY, variables, file="", file_name="name"
)
# when
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
# then
media = content["data"]["productMediaCreate"]["product"]["media"]
alt = "Rick Astley - Never Gonna Give You Up (Official Music Video)"
assert len(media) == 1
assert media[0]["url"] == "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
assert media[0]["alt"] == alt
assert media[0]["type"] == ProductMediaTypes.VIDEO
oembed_data = json.loads(media[0]["oembedData"])
assert oembed_data["url"] == "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
assert oembed_data["type"] == "video"
assert oembed_data["html"] is not None
assert oembed_data["thumbnail_url"] == (
"https://i.ytimg.com/vi/dQw4w9WgXcQ/hqdefault.jpg"
)
def test_product_media_create_mutation_without_url_or_image(
monkeypatch, staff_api_client, product, permission_manage_products, media_root
):
# given
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"alt": "Test Alt Text",
}
body = get_multipart_request_body(
PRODUCT_MEDIA_CREATE_QUERY, variables, file="", file_name="name"
)
# when
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
# then
errors = content["data"]["productMediaCreate"]["errors"]
assert len(errors) == 1
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
assert errors[0]["field"] == "input"
def test_product_media_create_mutation_with_both_url_and_image(
monkeypatch, staff_api_client, product, permission_manage_products, media_root
):
# given
image_file, image_name = create_image()
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"mediaUrl": "https://www.youtube.com/watch?v=SomeVideoID&ab_channel=Test",
"image": image_name,
"alt": "Test Alt Text",
}
body = get_multipart_request_body(
PRODUCT_MEDIA_CREATE_QUERY, variables, image_file, image_name
)
# when
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
# then
errors = content["data"]["productMediaCreate"]["errors"]
assert len(errors) == 1
assert errors[0]["code"] == ProductErrorCode.DUPLICATED_INPUT_ITEM.name
assert errors[0]["field"] == "input"
def test_product_media_create_mutation_with_unknown_url(
monkeypatch, staff_api_client, product, permission_manage_products, media_root
):
# given
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"mediaUrl": "https://www.videohosting.com/SomeVideoID",
"alt": "Test Alt Text",
}
body = get_multipart_request_body(
PRODUCT_MEDIA_CREATE_QUERY, variables, file="", file_name="name"
)
# when
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
# then
errors = content["data"]["productMediaCreate"]["errors"]
assert len(errors) == 1
assert errors[0]["code"] == ProductErrorCode.UNSUPPORTED_MEDIA_PROVIDER.name
assert errors[0]["field"] == "mediaUrl"
def test_invalid_product_media_create_mutation(
staff_api_client, product, permission_manage_products
):
# given
query = """
mutation createProductMedia($image: Upload!, $product: ID!) {
productMediaCreate(input: {image: $image, product: $product}) {
media {
id
url
sortOrder
}
errors {
field
message
}
}
}
"""
image_file, image_name = create_zip_file_with_image_ext()
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"image": image_name,
}
body = get_multipart_request_body(query, variables, image_file, image_name)
# when
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
# then
assert content["data"]["productMediaCreate"]["errors"] == [
{"field": "image", "message": "Invalid file type."}
]
product.refresh_from_db()
assert product.media.count() == 0 |
298,659 | test diagnostics detailed diags | # Copyright (C) 2021 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from hamcrest import ( assert_that,
contains_exactly,
contains_inanyorder,
has_entries,
has_entry )
from pprint import pformat
from unittest import TestCase
import json
import os
from ycmd.tests.rust import setUpModule, tearDownModule # noqa
from ycmd.tests.rust import PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import ( BuildRequest,
LocationMatcher,
PollForMessages,
PollForMessagesTimeoutException,
RangeMatcher,
WaitForDiagnosticsToBeReady,
WithRetry )
from ycmd.utils import ReadFile
MAIN_FILEPATH = PathToTestFile( 'common', 'src', 'main.rs' )
DIAG_MATCHERS_PER_FILE = {
MAIN_FILEPATH: contains_inanyorder(
has_entries( {
'kind': 'ERROR',
'text':
'no field `build_` on type `test::Builder`\nunknown field [E0609]',
'location': LocationMatcher( MAIN_FILEPATH, 14, 13 ),
'location_extent': RangeMatcher( MAIN_FILEPATH, ( 14, 13 ), ( 14, 19 ) ),
'ranges': contains_exactly( RangeMatcher( MAIN_FILEPATH,
( 14, 13 ),
( 14, 19 ) ) ),
'fixit_available': False
} )
)
}
class DiagnosticsTest( TestCase ):
@WithRetry()
@SharedYcmd
def METHOD_NAME( self, app ):
filepath = PathToTestFile( 'common', 'src', 'main.rs' )
contents = ReadFile( filepath )
with open( filepath, 'w' ) as f:
f.write( contents )
event_data = BuildRequest( event_name = 'FileSave',
contents = contents,
filepath = filepath,
filetype = 'rust' )
app.post_json( '/event_notification', event_data )
WaitForDiagnosticsToBeReady( app, filepath, contents, 'rust' )
request_data = BuildRequest( contents = contents,
filepath = filepath,
filetype = 'rust',
line_num = 14,
column_num = 13 )
results = app.post_json( '/detailed_diagnostic', request_data ).json
assert_that( results, has_entry(
'message',
'no field `build_` on type `test::Builder`\nunknown field' ) )
@WithRetry()
@SharedYcmd
def test_Diagnostics_FileReadyToParse( self, app ):
filepath = PathToTestFile( 'common', 'src', 'main.rs' )
contents = ReadFile( filepath )
with open( filepath, 'w' ) as f:
f.write( contents )
event_data = BuildRequest( event_name = 'FileSave',
contents = contents,
filepath = filepath,
filetype = 'rust' )
app.post_json( '/event_notification', event_data )
# It can take a while for the diagnostics to be ready.
results = WaitForDiagnosticsToBeReady( app, filepath, contents, 'rust' )
print( f'completer response: { pformat( results ) }' )
assert_that( results, DIAG_MATCHERS_PER_FILE[ filepath ] )
@SharedYcmd
def test_Diagnostics_Poll( self, app ):
project_dir = PathToTestFile( 'common' )
filepath = os.path.join( project_dir, 'src', 'main.rs' )
contents = ReadFile( filepath )
with open( filepath, 'w' ) as f:
f.write( contents )
event_data = BuildRequest( event_name = 'FileSave',
contents = contents,
filepath = filepath,
filetype = 'rust' )
app.post_json( '/event_notification', event_data )
# Poll until we receive _all_ the diags asynchronously.
to_see = sorted( DIAG_MATCHERS_PER_FILE.keys() )
seen = {}
try:
for message in PollForMessages( app,
{ 'filepath': filepath,
'contents': contents,
'filetype': 'rust' } ):
print( f'Message { pformat( message ) }' )
if 'diagnostics' in message:
if message[ 'diagnostics' ] == []:
# Sometimes we get empty diagnostics before the real ones.
continue
seen[ message[ 'filepath' ] ] = True
if message[ 'filepath' ] not in DIAG_MATCHERS_PER_FILE:
raise AssertionError( 'Received diagnostics for unexpected file '
f'{ message[ "filepath" ] }. Only expected { to_see }' )
assert_that( message, has_entries( {
'diagnostics': DIAG_MATCHERS_PER_FILE[ message[ 'filepath' ] ],
'filepath': message[ 'filepath' ]
} ) )
if sorted( seen.keys() ) == to_see:
break
# Eventually PollForMessages will throw a timeout exception and we'll fail
# if we don't see all of the expected diags.
except PollForMessagesTimeoutException as e:
raise AssertionError(
str( e ) +
'Timed out waiting for full set of diagnostics. '
f'Expected to see diags for { json.dumps( to_see, indent = 2 ) }, '
f'but only saw { json.dumps( sorted( seen.keys() ), indent = 2 ) }.' ) |
298,660 | jsonschema type mapping | import warnings
from transformers import BitsAndBytesConfig
from ludwig.api_annotations import DeveloperAPI
from ludwig.schema import utils as schema_utils
from ludwig.schema.metadata import LLM_METADATA
from ludwig.schema.metadata.parameter_metadata import convert_metadata_to_json
from ludwig.schema.utils import ludwig_dataclass
warnings.filterwarnings(
action="ignore",
category=UserWarning,
module="bitsandbytes.cuda_setup.main",
)
@DeveloperAPI
@ludwig_dataclass
class QuantizationConfig(schema_utils.BaseMarshmallowConfig):
bits: int = schema_utils.IntegerOptions(
options=[4, 8],
default=4,
description="The quantization level to apply to weights on load.",
parameter_metadata=LLM_METADATA["quantization"]["bits"],
)
llm_int8_threshold: float = schema_utils.NonNegativeFloat(
default=6.0,
description=(
"This corresponds to the outlier threshold for outlier detection as described in `LLM.int8() : 8-bit "
"Matrix Multiplication for Transformers at Scale` paper: https://arxiv.org/abs/2208.07339. Any hidden "
"states value that is above this threshold will be considered an outlier and the operation on those "
"values will be done in fp16. Values are usually normally distributed, that is, most values are in the "
"range [-3.5, 3.5], but there are some exceptional systematic outliers that are very differently "
"distributed for large models. These outliers are often in the interval [-60, -6] or [6, 60]. Int8 "
"quantization works well for values of magnitude ~5, but beyond that, there is a significant performance "
"penalty. A good default threshold is 6, but a lower threshold might be needed for more unstable models "
"(small models, fine-tuning)."
),
)
llm_int8_has_fp16_weight: bool = schema_utils.Boolean(
default=False,
description=(
"This flag runs LLM.int8() with 16-bit main weights. This is useful for fine-tuning as the weights do "
"not have to be converted back and forth for the backward pass."
),
)
bnb_4bit_compute_dtype: str = schema_utils.StringOptions(
options=["float32", "float16", "bfloat16"],
default="float16",
description=(
"This sets the computational type which might be different than the input type. For example, inputs "
"might be fp32, but computation can be set to bf16 for speedups."
),
)
bnb_4bit_use_double_quant: bool = schema_utils.Boolean(
default=True,
description=(
"This flag is used for nested quantization where the quantization constants from the first quantization "
"are quantized again."
),
)
bnb_4bit_quant_type: str = schema_utils.StringOptions(
options=["fp4", "nf4"],
default="nf4",
description="This sets the quantization data type in the bnb.nn.Linear4Bit layers.",
)
def to_bitsandbytes(self) -> BitsAndBytesConfig:
return BitsAndBytesConfig(
load_in_4bit=self.bits == 4,
load_in_8bit=self.bits == 8,
llm_int8_threshold=self.llm_int8_threshold,
llm_int8_has_fp16_weight=self.llm_int8_has_fp16_weight,
bnb_4bit_compute_dtype=self.bnb_4bit_compute_dtype,
bnb_4bit_use_double_quant=self.bnb_4bit_use_double_quant,
bnb_4bit_quant_type=self.bnb_4bit_quant_type,
)
@DeveloperAPI
class QuantizationConfigField(schema_utils.DictMarshmallowField):
def __init__(self):
super().__init__(QuantizationConfig, default_missing=True)
def METHOD_NAME(self):
return {
"oneOf": [
{
"type": "null",
"title": "disabled",
"description": "Disable quantization.",
"parameter_metadata": convert_metadata_to_json(LLM_METADATA["quantization"]["_oneOf"]["none"]),
},
{
**schema_utils.unload_jsonschema_from_marshmallow_class(QuantizationConfig),
"title": "enabled",
"description": "Set quantization options.",
"parameter_metadata": convert_metadata_to_json(LLM_METADATA["quantization"]["_oneOf"]["object"]),
},
],
"title": "quantization",
"description": "Set quantization options.",
"parameter_metadata": convert_metadata_to_json(LLM_METADATA["quantization"]["_meta"]),
} |
298,661 | google search | import re
import sys
import warnings
from urllib.parse import urlparse
import joblib
from googlesearch import search
from newspaper import Article
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.model_selection import train_test_split
warnings.filterwarnings("ignore")
def extractor(url):
"""Extractor function that gets the article body from the URL
Args:
url: URL of the article
Returns:
article: Article fetched from the URL
article_title: Title of the Article
"""
article = Article(url)
try:
article.download()
article.parse()
except:
pass
#Get the article title and convert them to lower-case
article_title = article.title
article = article.text.lower()
article = [article]
return (article, article_title)
def text_area_extractor(text):
"""
Textbox extractor function to preprocess and extract text
Args:
text: Raw Extracted Text from the News Article
Returns:
text: Preprocessed and clean text ready for analysis
"""
text = text.lower()
text = re.sub(r'[^a-zA-Z0-9\s]', ' ', text)
text = re.sub("(\\r|\r|\n)\\n$", " ", text)
text = [text]
return text
def METHOD_NAME(title, url):
"""
Function to perform a Google Search with the specified title and URL
Args:
title: Title of the Article
url: URL of the specified article
Returns:
search_urls: Similar News Articles found over the Web
source_sites: Hostname of the Articles founder over the Web
"""
target = url
domain = urlparse(target).hostname
search_urls = []
source_sites = []
for i in search(title, tld = "com", num = 10, start = 1, stop = 6):
if "youtube" not in i and domain not in i:
source_sites.append(urlparse(i).hostname)
search_urls.append(i)
return search_urls, source_sites
def similarity(url_list, article):
"""
Function to check the similarity of the News Article through Cosine Similarity
Args:
url_list: List of the URLs similar to the news article
article: Preprocessed article which would be vectorized
Returns:
cosine_cleaned: Cosine Similarity Scores of each URL passed
average_score: Average value of the cosine similarity scores fetched
"""
article = article
sim_tfv = TfidfVectorizer(stop_words ="english")
sim_transform1 = sim_tfv.fit_transform(article)
cosine = []
cosine_cleaned = []
cosine_average = 0
count = 0
for i in url_list:
test_article, test_title = extractor(i)
test_article = [test_article]
sim_transform2 = sim_tfv.transform(test_article[0])
score = cosine_similarity(sim_transform1, sim_transform2)
cosine.append(score*100)
count+=1
for i in cosine:
x = str(i).replace('[','').replace(']','')
cosine_cleaned.append(x)
for i in cosine:
if i !=0:
cosine_average = cosine_average + i
else:
count-=1
average_score = cosine_average/count
average_score = str(average_score).replace('[','').replace(']','')
average_score = float(average_score)
return cosine_cleaned, average_score
def handlelink(article_link):
"""Classification function to take the article link and predict the similar news articles
Args:
article_link: URL of the article
Returns:
pred: Predicted news sources from the machine learning model
article_title: Title of the Article
article: Article fetched from the URL
search_urls: Similar News Articles found over the Web
source_sites: Hostname of the Articles founder over the Web
"""
job_pac = joblib.load('models/pac.pkl')
job_vec = joblib.load('models/tfv.pkl')
url = (article_link)
article, article_title = extractor(article_link)
pred = job_pac.predict(job_vec.transform(article))
return pred, article_title, article, url
def similarNews(url):
"""
Driver function to return a dictionary with all the similar news and their similarity score
Args:
url: URL of the article
Returns:
dictionary: Dictionary containing all the similar news articles and their similarity score
"""
prediction, article_title, article, url = handlelink(article_link=url)
url_list, sitename = METHOD_NAME(article_title, url)
similarity_score, avgScore = similarity(url_list, article)
dictionary = dict(zip(url_list, similarity_score))
return dictionary
if __name__ == "__main__":
url=sys.argv[1]
similarNews = similarNews(url)
print ("{:<10} {:<10}".format('News Link', 'Similarity Score'))
for key, value in similarNews.items():
print ("{:<10} {:<10}".format(key, value)) |
298,662 | get zip with multiple files | import boto3
import io
import pytest
import time
import zipfile
from botocore.exceptions import ClientError
from moto import settings, mock_iam
from uuid import uuid4
_lambda_region = "us-west-2"
def _process_lambda(func_str):
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED)
zip_file.writestr("lambda_function.py", func_str)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
def get_test_zip_file1():
pfunc = """
def lambda_handler(event, context):
print("custom log event")
return event
"""
return _process_lambda(pfunc)
def get_test_zip_file2():
base_url = (
"motoserver:5000"
if settings.TEST_SERVER_MODE
else "ec2.us-west-2.amazonaws.com"
)
func_str = f"""
import boto3
def lambda_handler(event, context):
ec2 = boto3.resource('ec2', region_name='us-west-2', endpoint_url='http://{base_url}')
volume_id = event.get('volume_id')
vol = ec2.Volume(volume_id)
return {{'id': vol.id, 'state': vol.state, 'size': vol.size}}
"""
return _process_lambda(func_str)
def get_lambda_using_environment_port():
func_str = """
import boto3
import os
def lambda_handler(event, context):
base_url = os.environ.get("MOTO_HOST")
port = os.environ.get("MOTO_PORT")
url = base_url + ":" + port
conn = boto3.client('lambda', region_name='us-west-2', endpoint_url=url)
full_url = os.environ["MOTO_HTTP_ENDPOINT"]
functions = conn.list_functions()["Functions"]
return {'functions': functions, 'host': full_url}
"""
return _process_lambda(func_str)
def get_lambda_using_network_mode():
func_str = """
import boto3
import os
def lambda_handler(event, context):
port = os.environ.get("MOTO_PORT")
url = "http://localhost:" + port
conn = boto3.client('lambda', region_name='us-west-2', endpoint_url=url)
functions = conn.list_functions()["Functions"]
return {'response': functions}
"""
return _process_lambda(func_str)
def get_test_zip_file3():
pfunc = """
def lambda_handler(event, context):
print("Nr_of_records("+str(len(event['Records']))+")")
print("get_test_zip_file3 success")
return event
"""
return _process_lambda(pfunc)
def get_test_zip_file_error():
pfunc = """
def lambda_handler(event, context):
raise Exception('I failed!')
"""
return _process_lambda(pfunc)
def get_test_zip_largeresponse():
pfunc = """
def lambda_handler(event, context):
x = ["xxx" for x in range(10 ** 6)]
return {"statusCode": 200, "body": x}
"""
return _process_lambda(pfunc)
def METHOD_NAME():
pfunc = """
from utilities import util_function
def lambda_handler(event, context):
x = util_function()
event["msg"] = event["msg"] + x
return event
"""
ufunc = """
def util_function():
return "stuff"
"""
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, "a", zipfile.ZIP_DEFLATED)
zip_file.writestr("lambda_function.py", pfunc)
zip_file.close()
zip_file = zipfile.ZipFile(zip_output, "a", zipfile.ZIP_DEFLATED)
zip_file.writestr("utilities.py", ufunc)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
def get_test_zip_file_print_event():
pfunc = """
def lambda_handler(event, context):
print(event)
print("FINISHED_PRINTING_EVENT")
return event
"""
return _process_lambda(pfunc)
def create_invalid_lambda(role):
conn = boto3.client("lambda", _lambda_region)
zip_content = get_test_zip_file1()
function_name = str(uuid4())[0:6]
with pytest.raises(ClientError) as err:
conn.create_function(
FunctionName=function_name,
Runtime="python3.11",
Role=role,
Handler="lambda_function.handler",
Code={"ZipFile": zip_content},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
return err
def get_role_name():
with mock_iam():
iam = boto3.client("iam", region_name=_lambda_region)
while True:
try:
return iam.get_role(RoleName="my-role")["Role"]["Arn"]
except ClientError:
try:
return iam.create_role(
RoleName="my-role",
AssumeRolePolicyDocument="some policy",
Path="/my-path/",
)["Role"]["Arn"]
except ClientError:
pass
def wait_for_log_msg(expected_msg, log_group, wait_time=30):
logs_conn = boto3.client("logs", region_name="us-east-1")
received_messages = []
start = time.time()
while (time.time() - start) < wait_time:
try:
result = logs_conn.describe_log_streams(logGroupName=log_group)
log_streams = result.get("logStreams")
except ClientError:
log_streams = None # LogGroupName does not yet exist
if not log_streams:
time.sleep(1)
continue
for log_stream in log_streams:
result = logs_conn.get_log_events(
logGroupName=log_group, logStreamName=log_stream["logStreamName"]
)
received_messages.extend(
[event["message"] for event in result.get("events")]
)
for line in received_messages:
if expected_msg in line:
return True, set(received_messages)
time.sleep(1)
return False, set(received_messages) |
298,663 | test experiment list valid | import re
from a2ml.tasks_queue.tasks_api import *
from a2ml.api.utils.context import Context
from tests.tasks_queue.base_test import BaseTest
from tests.vcr_helper import vcr
from unittest.mock import ANY
# For record cassetes run
# make docker-up
# set credentials in base_test.build_context
# AWS_ACCESS_KEY_ID=secret AWS_SECRET_ACCESS_KEY=strongsecret S3_ENDPOINT_URL=http://localhost:9000 pytest tests/tasks_queue/test_tasks_api_auger.py
# Revert credentials change back in base_test.build_context after recording
class TestTasksApiAuger(BaseTest):
def assert_result(self, res, expected_result, expected_data, no_provider_in_result=False):
assert isinstance(res, dict)
if no_provider_in_result:
response = res['response']
else:
response = res['response']['auger']
assert response['result'] == expected_result, response['data']
assert response['data'] == expected_data
@vcr.use_cassette('auger/new/valid.yaml')
def test_new_success(self):
params = self.params('auger', 'new-project-name')
res = new_project_task.apply(params).result
self.assert_result(res, True, {'created': 'new-project-name'})
@vcr.use_cassette('auger/import/valid.yaml')
def test_import_valid(self, monkeypatch):
monkeypatch.setattr('a2ml.api.auger.impl.cloud.project.AugerProjectApi.start', lambda self: None)
params = self.params('auger')
res = import_data_task.apply(params).result
self.assert_result(res, True, {'created': 'iris.csv'})
#TODO: fix spec
# @vcr.use_cassette('auger/train/valid.yaml')
# def test_train_valid(self):
# params = self.params('auger')
# res = train_task.apply(params).result
# self.assert_result(res, True, {'experiment_name': ANY, 'session_id': ANY})
@vcr.use_cassette('auger/evaluate/valid.yaml')
def test_evaluate_valid(self):
params = self.params('auger', '29654979bf8a1877')
res = evaluate_task.apply(params).result
self.assert_result(res, True,
{'leaderboard': ANY, 'run_id': '29654979bf8a1877',
'status': 'completed', 'provider_status': 'completed', 'trials_count': 19})
@vcr.use_cassette('auger/deploy/valid.yaml')
def test_deploy_valid(self, monkeypatch):
monkeypatch.setattr('a2ml.api.auger.impl.cloud.project.AugerProjectApi.start', lambda self: None)
params = self.params('auger', '390955D2AB984D7', False, False)
res = deploy_task.apply(params).result
self.assert_result(res, True, {'model_id': '390955D2AB984D7'}, no_provider_in_result=True)
@vcr.use_cassette('auger/project/list_valid.yaml')
def test_project_list_valid(self):
params = self.params('auger')
res = list_projects_task.apply(params).result
self.assert_result(res, True, {'projects': ANY})
@vcr.use_cassette('auger/project/delete_valid.yaml')
def test_project_delete_valid(self):
params = self.params('auger', 'new-project-name')
res = delete_project_task.apply(params).result
self.assert_result(res, True, {'deleted': 'new-project-name'})
@vcr.use_cassette('auger/dataset/list_valid.yaml')
def test_dataset_list_valid(self):
params = self.params('auger')
res = list_datasets_task.apply(params).result
self.assert_result(res, True, {'datasets': ANY})
@vcr.use_cassette('auger/dataset/delete_valid.yaml')
def test_dataset_delete_valid(self):
params = self.params('auger', 'iris-13.csv')
res = delete_dataset_task.apply(params).result
self.assert_result(res, True, {'deleted': 'iris-13.csv'})
@vcr.use_cassette('auger/experiment/history_valid.yaml')
def test_experiment_history_valid(self):
params = self.params('auger')
res = history_experiment_task.apply(params).result
self.assert_result(res, True, {'history': ANY})
@vcr.use_cassette('auger/experiment/leaderboard_valid.yaml')
def test_experiment_leaderboard_valid(self):
params = self.params('auger', 'a6bc4bdb6607e7c2')
res = leaderboard_experiment_task.apply(params).result
self.assert_result(res, True, {'leaderboard': ANY, 'run_id': 'a6bc4bdb6607e7c2',
'status': 'completed', 'provider_status': 'completed', 'trials_count': 34})
@vcr.use_cassette('auger/experiment/list_valid.yaml')
def METHOD_NAME(self):
params = self.params('auger')
res = list_experiments_task.apply(params).result
self.assert_result(res, True, {'experiments': ANY})
@vcr.use_cassette('auger/predict/valid.yaml')
def test_predict_success(self, monkeypatch):
monkeypatch.setattr('a2ml.api.auger.impl.cloud.project.AugerProjectApi.start', lambda self: None)
params = self.params(
'auger',
'2B702A5511A44E3',
'tests/fixtures/iris_for_predict.csv',
)
res = predict_model_task.apply(params).result
self.assert_result(res, True, {'predicted': ANY}, no_provider_in_result=True)
@vcr.use_cassette('auger/predict/invalid_model.yaml')
def test_predict_failure_model_status(self, monkeypatch):
monkeypatch.setattr('a2ml.api.auger.impl.cloud.project.AugerProjectApi.start', lambda self: None)
params = self.params(
'auger',
'BF8BDC3CD21648A',
'tests/fixtures/iris_for_predict.csv',
)
res = predict_model_task.apply(params).result
self.assert_result(res, False, 'Pipeline BF8BDC3CD21648A is not ready...', no_provider_in_result=True)
|
298,664 | test pass config dir click path | import argparse
import contextlib
import pathlib
import typing as t
import libtmux
import pytest
from libtmux.server import Server
from tmuxp import cli
from tmuxp.cli.import_config import get_teamocil_dir, get_tmuxinator_dir
from tmuxp.cli.load import _reattach, load_plugins
from tmuxp.cli.utils import tmuxp_echo
from tmuxp.config_reader import ConfigReader
from tmuxp.workspace import loader
from tmuxp.workspace.builder import WorkspaceBuilder
from tmuxp.workspace.finders import find_workspace_file
from ..fixtures import utils as test_utils
if t.TYPE_CHECKING:
import _pytest.capture
def test_creates_config_dir_not_exists(tmp_path: pathlib.Path) -> None:
"""cli.startup() creates config dir if not exists."""
cli.startup(tmp_path)
assert tmp_path.exists()
@pytest.mark.parametrize(
"cli_args",
[
(["--help"]),
(["-h"]),
],
)
def test_help(
cli_args: t.List[str],
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture,
) -> None:
# In scrunched terminals, prevent width causing differantiation in result.out.
monkeypatch.setenv("COLUMNS", "100")
monkeypatch.setenv("LINES", "100")
with contextlib.suppress(SystemExit):
cli.cli(cli_args)
result = capsys.readouterr()
assert "usage: tmuxp [-h] [--version] [--log-level log-level]" in result.out
def test_resolve_behavior(
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch
) -> None:
expect = tmp_path
monkeypatch.chdir(tmp_path)
assert pathlib.Path("../").resolve() == expect.parent
assert pathlib.Path().resolve() == expect
assert pathlib.Path("./").resolve() == expect
assert pathlib.Path(expect).resolve() == expect
def test_get_tmuxinator_dir(monkeypatch: pytest.MonkeyPatch) -> None:
assert get_tmuxinator_dir() == pathlib.Path("~/.tmuxinator").expanduser()
monkeypatch.setenv("HOME", "/moo")
assert get_tmuxinator_dir() == pathlib.Path("/moo/.tmuxinator/")
assert str(get_tmuxinator_dir()) == "/moo/.tmuxinator"
assert get_tmuxinator_dir() == pathlib.Path("~/.tmuxinator/").expanduser()
def test_get_teamocil_dir(monkeypatch: pytest.MonkeyPatch) -> None:
assert get_teamocil_dir() == pathlib.Path("~/.teamocil/").expanduser()
monkeypatch.setenv("HOME", "/moo")
assert get_teamocil_dir() == pathlib.Path("/moo/.teamocil/")
assert str(get_teamocil_dir()) == "/moo/.teamocil"
assert get_teamocil_dir() == pathlib.Path("~/.teamocil/").expanduser()
def METHOD_NAME(
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture,
) -> None:
configdir = tmp_path / "myconfigdir"
configdir.mkdir()
user_config_name = "myconfig"
user_config = configdir / f"{user_config_name}.yaml"
user_config.touch()
expect = str(user_config)
parser = argparse.ArgumentParser()
parser.add_argument("workspace_file", type=str)
def config_cmd(workspace_file: str) -> None:
tmuxp_echo(find_workspace_file(workspace_file, workspace_dir=configdir))
def check_cmd(config_arg) -> "_pytest.capture.CaptureResult":
args = parser.parse_args([config_arg])
config_cmd(workspace_file=args.workspace_file)
return capsys.readouterr()
monkeypatch.chdir(configdir)
assert expect in check_cmd("myconfig").out
assert expect in check_cmd("myconfig.yaml").out
assert expect in check_cmd("./myconfig.yaml").out
assert str(user_config) in check_cmd(str(configdir / "myconfig.yaml")).out
with pytest.raises(FileNotFoundError):
assert "FileNotFoundError" in check_cmd(".tmuxp.json").out
def test_reattach_plugins(
monkeypatch_plugin_test_packages: None, server: "Server"
) -> None:
config_plugins = test_utils.read_workspace_file("workspace/builder/plugin_r.yaml")
session_configig = ConfigReader._load(format="yaml", content=config_plugins)
session_configig = loader.expand(session_configig)
# open it detached
builder = WorkspaceBuilder(
session_config=session_configig,
plugins=load_plugins(session_configig),
server=server,
)
builder.build()
with contextlib.suppress(libtmux.exc.LibTmuxException):
_reattach(builder)
assert builder.session is not None
proc = builder.session.cmd("display-message", "-p", "'#S'")
assert proc.stdout[0] == "'plugin_test_r'" |
298,665 | validate | from tracardi.service.plugin.domain.register import Plugin, Spec, MetaData, Documentation, PortDoc, Form, FormGroup, \
FormField, FormComponent
from tracardi.service.plugin.runner import ActionRunner
from tracardi.service.wf.domain.edge import Edge
from .model.config import Config
from tracardi.service.plugin.domain.result import Result
def METHOD_NAME(config: dict) -> Config:
return Config(**config)
class PayloadMemoryCollector(ActionRunner):
config: Config
async def set_up(self, init):
self.config = METHOD_NAME(init)
async def run(self, payload: dict, in_edge: Edge = None) -> Result:
if self.config.name in self.memory:
if self.config.type == 'list' and not isinstance(self.memory[self.config.name], list):
raise ValueError(f"Memory has key {self.config.name} and it is not a list.")
elif self.config.type == 'dict' and not isinstance(self.memory[self.config.name], dict):
raise ValueError(f"Memory has key {self.config.name} and it is not a dictionary.")
if self.config.name not in self.memory:
if self.config.type == 'dict':
self.memory[self.config.name] = {}
elif self.config.type == 'list':
self.memory[self.config.name] = []
if self.config.type == 'dict':
if not in_edge.has_name():
raise ValueError(f"Edge id {in_edge.id} has no name. Could not create object with payload value. "
f"Edge name is used to save payload value.")
self.memory[self.config.name][in_edge.data.name.replace(" ", "-")] = payload
elif self.config.type == 'list':
self.memory[self.config.name].append(payload)
return Result(port="payload", value=payload)
def register() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module=__name__,
className='PayloadMemoryCollector',
inputs=["payload"],
outputs=["payload"],
version='0.7.1',
license="MIT",
author="Risto Kowaczewski",
init={
"name": None,
"type": "list"
},
manual="memory/payload_memory_collector",
form=Form(
groups=[
FormGroup(
name="Payload memory collector configuration",
fields=[
FormField(
id="name",
name="Name of collection",
description="Please provide the name under which the collection will be saved.",
component=FormComponent(type="text", props={"label": "Name"})
),
FormField(
id="type",
name="Type of collection",
description="Select type of collection. Type of `Dictionary` requires named connections "
"in the workflow.",
component=FormComponent(type="select", props={"label": "Name", "items": {
"list": "List",
"dict": "Dictionary"
}})
)
]
)
]
)
),
metadata=MetaData(
name='Payload collector',
desc='Collects input payloads in the workflow memory object.',
tags=['memory', 'join'],
icon='array',
group=["Operations"],
purpose=['collection', 'segmentation'],
documentation=Documentation(
inputs={
"payload": PortDoc(desc="This port takes payload object.")
},
outputs={
"payload": PortDoc(desc="This port returns input payload.")
}
)
)
) |
298,666 | is experimental | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2022 Canonical Ltd.
# 2023 Scarlett Moore <sgmoore@kde.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Generic KDE NEON extension to support core22 and onwards."""
import dataclasses
import functools
import re
from typing import Any, Dict, List, Optional, Tuple
from overrides import overrides
from .extension import Extension, get_extensions_data_dir, prepend_to_env
_SDK_SNAP = {"core22": "kf5-5-108-qt-5-15-10-core22-sdk"}
@dataclasses.dataclass
class ExtensionInfo:
"""Content/SDK build information."""
cmake_args: str
@dataclasses.dataclass
class KDESnaps:
"""A structure of KDE related snaps."""
sdk: str
content: str
builtin: bool = True
class KDENeon(Extension):
r"""The KDE Neon extension.
This extension makes it easy to assemble KDE based applications
using the Neon stack.
It configures each application with the following plugs:
\b
- Common Icon Themes.
- Common Sound Themes.
- The Qt5 and KDE Frameworks runtime libraries and utilities.
For easier desktop integration, it also configures each application
entry with these additional plugs:
\b
- desktop (https://snapcraft.io/docs/desktop-interface)
- desktop-legacy (https://snapcraft.io/docs/desktop-legacy-interface)
- opengl (https://snapcraft.io/docs/opengl-interface)
- wayland (https://snapcraft.io/docs/wayland-interface)
- x11 (https://snapcraft.io/docs/x11-interface)
"""
@staticmethod
@overrides
def get_supported_bases() -> Tuple[str, ...]:
return ("core22",)
@staticmethod
@overrides
def get_supported_confinement() -> Tuple[str, ...]:
return "strict", "devmode"
@staticmethod
@overrides
def METHOD_NAME(base: Optional[str]) -> bool:
return False
@overrides
def get_app_snippet(self) -> Dict[str, Any]:
return {
"command-chain": ["snap/command-chain/desktop-launch"],
"plugs": ["desktop", "desktop-legacy", "opengl", "wayland", "x11"],
}
@functools.cached_property
def kde_snaps(self) -> KDESnaps:
"""Return the KDE related snaps to use to construct the environment."""
base = self.yaml_data["base"]
sdk_snap = _SDK_SNAP[base]
build_snaps: List[str] = []
for part in self.yaml_data["parts"].values():
build_snaps.extend(part.get("build-snaps", []))
matcher = re.compile(r"kf5-\d+-\d+-qt-\d+.*-" + base + r"-sdk.*")
sdk_snap_candidates = [s for s in build_snaps if matcher.match(s)]
if sdk_snap_candidates:
sdk_snap = sdk_snap_candidates[0].split("/")[0]
builtin = False
else:
builtin = True
# The same except the trailing -sd
content = sdk_snap[:-4]
return KDESnaps(sdk=sdk_snap, content=content, builtin=builtin)
@functools.cached_property
def ext_info(self) -> ExtensionInfo:
"""Return the extension info cmake_args, provider, content, build_snaps."""
cmake_args = "-DCMAKE_FIND_ROOT_PATH=/snap/" + self.kde_snaps.sdk + "/current"
return ExtensionInfo(cmake_args=cmake_args)
@overrides
def get_root_snippet(self) -> Dict[str, Any]:
platform_snap = self.kde_snaps.content
content_snap = self.kde_snaps.content + "-all"
return {
"assumes": ["snapd2.43"], # for 'snapctl is-connected'
"compression": "lzo",
"plugs": {
"desktop": {"mount-host-font-cache": False},
"icon-themes": {
"interface": "content",
"target": "$SNAP/data-dir/icons",
"default-provider": "gtk-common-themes",
},
"sound-themes": {
"interface": "content",
"target": "$SNAP/data-dir/sounds",
"default-provider": "gtk-common-themes",
},
platform_snap: {
"content": content_snap,
"interface": "content",
"default-provider": platform_snap,
"target": "$SNAP/kf5",
},
},
"environment": {"SNAP_DESKTOP_RUNTIME": "$SNAP/kf5"},
"hooks": {
"configure": {
"plugs": ["desktop"],
"command-chain": ["snap/command-chain/hooks-configure-desktop"],
}
},
"layout": {"/usr/share/X11": {"symlink": "$SNAP/kf5/usr/share/X11"}},
}
@overrides
def get_part_snippet(self, *, plugin_name: str) -> Dict[str, Any]:
sdk_snap = self.kde_snaps.sdk
cmake_args = self.ext_info.cmake_args
return {
"build-environment": [
{
"PATH": prepend_to_env(
"PATH", [f"/snap/{sdk_snap}/current/usr/bin"]
),
},
{
"XDG_DATA_DIRS": prepend_to_env(
"XDG_DATA_DIRS",
[
f"$CRAFT_STAGE/usr/share:/snap/{sdk_snap}/current/usr/share",
"/usr/share",
],
),
},
{
"SNAPCRAFT_CMAKE_ARGS": prepend_to_env(
"SNAPCRAFT_CMAKE_ARGS",
[
cmake_args,
],
),
},
],
}
@overrides
def get_parts_snippet(self) -> Dict[str, Any]:
# We can change this to the lightweight command-chain when
# the content snap includes the desktop-launch from
# https://github.com/snapcore/snapcraft-desktop-integration
source = get_extensions_data_dir() / "desktop" / "kde-neon"
if self.kde_snaps.builtin:
return {
"kde-neon/sdk": {
"source": str(source),
"plugin": "make",
"make-parameters": [f"PLATFORM_PLUG={self.kde_snaps.content}"],
"build-snaps": [self.kde_snaps.sdk],
},
}
return {
"kde-neon/sdk": {
"source": str(source),
"plugin": "make",
"make-parameters": [f"PLATFORM_PLUG={self.kde_snaps.content}"],
},
} |
298,667 | test regenerate only if necessary | # encoding: utf-8
u"""
Tests for ``ckan.lib.i18n``.
"""
import codecs
import json
import os.path
import shutil
import tempfile
import pytest
from ckan.lib import i18n
from ckan import plugins
from ckan.lib.plugins import DefaultTranslation
HERE = os.path.abspath(os.path.dirname(__file__))
I18N_DIR = os.path.join(HERE, u"_i18n_build_js_translations")
I18N_DUMMY_DIR = os.path.join(HERE, u"_i18n_dummy_es")
class JSTranslationsTestPlugin(plugins.SingletonPlugin, DefaultTranslation):
u"""
CKAN plugin for testing JavaScript translations from extensions.
Registered in ``setup.py`` as ``test_js_translations_plugin``.
"""
plugins.implements(plugins.ITranslation)
def i18n_directory(self):
return I18N_DIR
def i18n_domain(self):
return u"ckanext-test_js_translations"
@pytest.mark.ckan_config(u"ckan.plugins", u"test_js_translations_plugin")
@pytest.mark.usefixtures(u"with_plugins")
class TestBuildJSTranslations(object):
u"""
Tests for ``ckan.lib.i18n.build_js_translations``.
"""
def setup(self):
self.temp_dir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.temp_dir, ignore_errors=True)
def build_js_translations(self):
u"""
Build JS translations in temporary directory.
"""
old_translations_dir = i18n._JS_TRANSLATIONS_DIR
i18n._JS_TRANSLATIONS_DIR = self.temp_dir
try:
return i18n.build_js_translations()
finally:
i18n._JS_TRANSLATIONS_DIR = old_translations_dir
def test_output_is_valid(self):
u"""
Test that the generated JS files are valid.
"""
def check_file(path):
with codecs.open(path, u"r", encoding=u"utf-8") as f:
data = json.load(f)
assert data[u""].get(u"domain", None) == u"ckan"
self.build_js_translations()
files = os.listdir(self.temp_dir)
# Check that all locales have been generated
assert set(i18n.get_locales()).difference([u"en"]) == set(
os.path.splitext(fn)[0] for fn in files
)
# Check that each file is valid
for filename in files:
check_file(os.path.join(self.temp_dir, filename))
def METHOD_NAME(self):
u"""
Test that translation files are only generated when necessary.
"""
self.build_js_translations()
mtimes = {}
for filename in os.listdir(self.temp_dir):
fullname = os.path.join(self.temp_dir, filename)
mtimes[filename] = os.path.getmtime(fullname)
# Remove an output file and back-date another one
removed_filename, outdated_filename = sorted(mtimes.keys())[:2]
mtimes.pop(removed_filename)
outdated_mtime = mtimes.pop(outdated_filename)
os.remove(os.path.join(self.temp_dir, removed_filename))
os.utime(os.path.join(self.temp_dir, outdated_filename), (0, 0))
self.build_js_translations()
# Make sure deleted file has been rebuild
assert os.path.isfile(os.path.join(self.temp_dir, removed_filename))
# Make sure outdated file has been rebuild
fullname = os.path.join(self.temp_dir, outdated_filename)
assert os.path.getmtime(fullname) >= outdated_mtime
# Make sure the other files have not been rebuild
for filename in os.listdir(self.temp_dir):
if filename in [removed_filename, outdated_filename]:
continue
fullname = os.path.join(self.temp_dir, filename)
new_mtime = os.path.getmtime(fullname)
assert new_mtime == mtimes[filename]
def test_translations_from_extensions(self):
u"""
Test that translations from extensions are taken into account.
"""
self.build_js_translations()
filename = os.path.join(self.temp_dir, u"de.js")
with codecs.open(filename, u"r", encoding=u"utf-8") as f:
de = json.load(f)
# Check overriding a JS translation from CKAN core
assert u"Loading..." in de
assert de[u"Loading..."] == [None, u"foo"]
# Check introducing a new JS translation
assert u"Test JS Translations 1" in de
assert de[u"Test JS Translations 1"] == [None, u"bar"]
# Check that non-JS strings are not exported
assert u"Test JS Translations 2" not in de
@pytest.mark.ckan_config("ckan.plugins", "test_blueprint_plugin")
@pytest.mark.usefixtures("with_plugins")
class TestI18nFlask(object):
def test_translation_works(self, app):
resp = app.get("/view_translated")
assert resp.data == b"Dataset"
resp = app.get("/es/view_translated")
assert resp.data == b"Conjunto de datos"
@pytest.mark.ckan_config("ckan.i18n_directory", I18N_DUMMY_DIR)
def test_config_i18n_directory(self, app):
resp = app.get("/view_translated")
assert resp.data == b"Dataset"
resp = app.get("/es/view_translated")
assert resp.data == b"Foo baz 123" |
298,668 | get filter conditions | # Copyright (c) 2023, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe import _
def execute(filters=None):
data = get_data(filters)
columns = get_columns(filters, data)
return columns, data
def get_data(filters):
filter_conditions = METHOD_NAME(filters)
return frappe.get_all(
"Serial and Batch Bundle",
fields=[
"`tabSerial and Batch Bundle`.`voucher_type`",
"`tabSerial and Batch Bundle`.`posting_date`",
"`tabSerial and Batch Bundle`.`name`",
"`tabSerial and Batch Bundle`.`company`",
"`tabSerial and Batch Bundle`.`voucher_no`",
"`tabSerial and Batch Bundle`.`item_code`",
"`tabSerial and Batch Bundle`.`item_name`",
"`tabSerial and Batch Entry`.`serial_no`",
"`tabSerial and Batch Entry`.`batch_no`",
"`tabSerial and Batch Entry`.`warehouse`",
"`tabSerial and Batch Entry`.`incoming_rate`",
"`tabSerial and Batch Entry`.`stock_value_difference`",
"`tabSerial and Batch Entry`.`qty`",
],
filters=filter_conditions,
order_by="posting_date",
)
def METHOD_NAME(filters):
filter_conditions = [
["Serial and Batch Bundle", "docstatus", "=", 1],
["Serial and Batch Bundle", "is_cancelled", "=", 0],
]
for field in ["voucher_type", "voucher_no", "item_code", "warehouse", "company"]:
if filters.get(field):
if field == "voucher_no":
filter_conditions.append(["Serial and Batch Bundle", field, "in", filters.get(field)])
else:
filter_conditions.append(["Serial and Batch Bundle", field, "=", filters.get(field)])
if filters.get("from_date") and filters.get("to_date"):
filter_conditions.append(
[
"Serial and Batch Bundle",
"posting_date",
"between",
[filters.get("from_date"), filters.get("to_date")],
]
)
for field in ["serial_no", "batch_no"]:
if filters.get(field):
filter_conditions.append(["Serial and Batch Entry", field, "=", filters.get(field)])
return filter_conditions
def get_columns(filters, data):
columns = [
{
"label": _("Company"),
"fieldname": "company",
"fieldtype": "Link",
"options": "Company",
"width": 120,
},
{
"label": _("Serial and Batch Bundle"),
"fieldname": "name",
"fieldtype": "Link",
"options": "Serial and Batch Bundle",
"width": 110,
},
{"label": _("Posting Date"), "fieldname": "posting_date", "fieldtype": "Date", "width": 100},
]
item_details = {}
item_codes = []
if filters.get("voucher_type"):
item_codes = [d.item_code for d in data]
if filters.get("item_code") or (item_codes and len(list(set(item_codes))) == 1):
item_details = frappe.get_cached_value(
"Item",
filters.get("item_code") or item_codes[0],
["has_serial_no", "has_batch_no"],
as_dict=True,
)
if not filters.get("voucher_no"):
columns.extend(
[
{
"label": _("Voucher Type"),
"fieldname": "voucher_type",
"fieldtype": "Link",
"options": "DocType",
"width": 120,
},
{
"label": _("Voucher No"),
"fieldname": "voucher_no",
"fieldtype": "Dynamic Link",
"options": "voucher_type",
"width": 160,
},
]
)
if not filters.get("item_code"):
columns.extend(
[
{
"label": _("Item Code"),
"fieldname": "item_code",
"fieldtype": "Link",
"options": "Item",
"width": 120,
},
{"label": _("Item Name"), "fieldname": "item_name", "fieldtype": "Data", "width": 120},
]
)
if not filters.get("warehouse"):
columns.append(
{
"label": _("Warehouse"),
"fieldname": "warehouse",
"fieldtype": "Link",
"options": "Warehouse",
"width": 120,
}
)
if not item_details or item_details.get("has_serial_no"):
columns.append(
{"label": _("Serial No"), "fieldname": "serial_no", "fieldtype": "Data", "width": 120}
)
if not item_details or item_details.get("has_batch_no"):
columns.extend(
[
{"label": _("Batch No"), "fieldname": "batch_no", "fieldtype": "Data", "width": 120},
{"label": _("Batch Qty"), "fieldname": "qty", "fieldtype": "Float", "width": 120},
]
)
columns.extend(
[
{"label": _("Incoming Rate"), "fieldname": "incoming_rate", "fieldtype": "Float", "width": 120},
{
"label": _("Change in Stock Value"),
"fieldname": "stock_value_difference",
"fieldtype": "Float",
"width": 120,
},
]
)
return columns
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def get_voucher_type(doctype, txt, searchfield, start, page_len, filters):
child_doctypes = frappe.get_all(
"DocField",
filters={"fieldname": "serial_and_batch_bundle"},
fields=["distinct parent as parent"],
)
query_filters = {"options": ["in", [d.parent for d in child_doctypes]]}
if txt:
query_filters["parent"] = ["like", "%{}%".format(txt)]
return frappe.get_all("DocField", filters=query_filters, fields=["distinct parent"], as_list=True)
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def get_serial_nos(doctype, txt, searchfield, start, page_len, filters):
query_filters = {}
if txt:
query_filters["serial_no"] = ["like", f"%{txt}%"]
if filters.get("voucher_no"):
serial_batch_bundle = frappe.get_cached_value(
"Serial and Batch Bundle",
{"voucher_no": ("in", filters.get("voucher_no")), "docstatus": 1, "is_cancelled": 0},
"name",
)
query_filters["parent"] = serial_batch_bundle
if not txt:
query_filters["serial_no"] = ("is", "set")
return frappe.get_all(
"Serial and Batch Entry", filters=query_filters, fields=["serial_no"], as_list=True
)
else:
query_filters["item_code"] = filters.get("item_code")
return frappe.get_all("Serial No", filters=query_filters, as_list=True)
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def get_batch_nos(doctype, txt, searchfield, start, page_len, filters):
query_filters = {}
if txt:
query_filters["batch_no"] = ["like", f"%{txt}%"]
if filters.get("voucher_no"):
serial_batch_bundle = frappe.get_cached_value(
"Serial and Batch Bundle",
{"voucher_no": ("in", filters.get("voucher_no")), "docstatus": 1, "is_cancelled": 0},
"name",
)
query_filters["parent"] = serial_batch_bundle
if not txt:
query_filters["batch_no"] = ("is", "set")
return frappe.get_all(
"Serial and Batch Entry", filters=query_filters, fields=["batch_no"], as_list=True
)
else:
query_filters["item"] = filters.get("item_code")
return frappe.get_all("Batch", filters=query_filters, as_list=True) |
298,669 | create extension | import logging
from qtpy.QtDesigner import QExtensionFactory, QPyDesignerTaskMenuExtension
from qtpy import QtWidgets
from ..utilities import copy_to_clipboard, get_clipboard_text
from ..widgets.base import PyDMPrimitiveWidget
from ..widgets.rules_editor import RulesEditor
from ..widgets.designer_settings import BasicSettingsEditor
from ..widgets.archiver_time_plot_editor import ArchiverTimePlotCurveEditorDialog
from ..widgets.waveformplot_curve_editor import WaveformPlotCurveEditorDialog
from ..widgets.timeplot_curve_editor import TimePlotCurveEditorDialog
from ..widgets.scatterplot_curve_editor import ScatterPlotCurveEditorDialog
from ..widgets.eventplot_curve_editor import EventPlotCurveEditorDialog
from ..widgets.symbol_editor import SymbolEditor
logger = logging.getLogger(__name__)
class PyDMExtensionFactory(QExtensionFactory):
def __init__(self, parent=None):
super(PyDMExtensionFactory, self).__init__(parent)
def METHOD_NAME(self, obj, iid, parent):
if not isinstance(obj, PyDMPrimitiveWidget):
return None
# For now check the iid for TaskMenu...
if iid == "org.qt-project.Qt.Designer.TaskMenu":
return PyDMTaskMenuExtension(obj, parent)
# In the future we can expand to the others such as Property and etc
# When the time comes... we will need a new PyDMExtension and
# the equivalent for PyDMTaskMenuExtension classes for the
# property editor and an elif statement in here to instantiate it...
return None
class PyDMTaskMenuExtension(QPyDesignerTaskMenuExtension):
def __init__(self, widget, parent):
super(PyDMTaskMenuExtension, self).__init__(parent)
self.widget = widget
self.__actions = None
self.__extensions = []
extensions = getattr(widget, 'extensions', None)
if extensions is not None:
for ex in extensions:
extension = ex(self.widget)
self.__extensions.append(extension)
def taskActions(self):
if self.__actions is None:
self.__actions = []
for ex in self.__extensions:
self.__actions.extend(ex.actions())
return self.__actions
def preferredEditAction(self):
if self.__actions is None:
self.taskActions()
if self.__actions:
return self.__actions[0]
class PyDMExtension(object):
def __init__(self, widget):
self.widget = widget
def actions(self):
raise NotImplementedError
class BasicSettingsExtension(PyDMExtension):
def __init__(self, widget):
super(BasicSettingsExtension, self).__init__(widget)
self.widget = widget
self.edit_settings_action = QtWidgets.QAction(
"Py&DM basic settings...", self.widget
)
self.edit_settings_action.triggered.connect(self.open_dialog)
if not hasattr(widget, "channel"):
self.channel_menu_action = None
else:
self.channel_menu_action = QtWidgets.QAction(
"PyDM C&hannel", self.widget
)
# self.channel_menu_action.triggered.connect(self.open_channel_menu)
self.channel_menu = QtWidgets.QMenu()
self.copy_channel_action = self.channel_menu.addAction("")
self.copy_channel_action.triggered.connect(self.copy_channel)
self.paste_channel_action = self.channel_menu.addAction("")
self.paste_channel_action.triggered.connect(self.paste_channel)
self.channel_menu.aboutToShow.connect(self.update_action_clipboard_text)
edit_channel = self.channel_menu.addAction(
"&Edit channel..."
)
edit_channel.triggered.connect(self.open_dialog)
copy_channel_value = self.channel_menu.addAction(
"C&opy current value"
)
copy_channel_value.triggered.connect(self.copy_channel_value)
self.channel_menu_action.setMenu(self.channel_menu)
def update_action_clipboard_text(self):
self.copy_channel_action.setText(
f"&Copy to clipboard: {self.widget.channel}"
)
clipboard_text = get_clipboard_text() or ""
self.paste_channel_action.setText(
f"&Paste from clipboard: {clipboard_text[:100]}"
)
def copy_channel(self, _):
channel = self.widget.channel
if channel:
copy_to_clipboard(channel)
def copy_channel_value(self, _):
value = getattr(self.widget, "value", None)
if value:
copy_to_clipboard(value)
def paste_channel(self, _):
self.widget.channel = get_clipboard_text() or ""
logger.info("Set widget channel to %r", self.widget.channel)
def open_dialog(self, state):
dialog = BasicSettingsEditor(self.widget, parent=None)
dialog.exec_()
def actions(self):
actions = [
self.edit_settings_action,
self.channel_menu_action,
]
return [action for action in actions if action is not None]
class RulesExtension(PyDMExtension):
def __init__(self, widget):
super(RulesExtension, self).__init__(widget)
self.widget = widget
self.edit_rules_action = QtWidgets.QAction("Edit Rules...", self.widget)
self.edit_rules_action.triggered.connect(self.edit_rules)
def edit_rules(self, state):
edit_rules_dialog = RulesEditor(self.widget, parent=None)
edit_rules_dialog.exec_()
def actions(self):
return [self.edit_rules_action]
class SymbolExtension(PyDMExtension):
def __init__(self, widget):
super(SymbolExtension, self).__init__(widget)
self.widget = widget
self.edit_symbols_action = QtWidgets.QAction("Edit Symbols...", self.widget)
self.edit_symbols_action.triggered.connect(self.edit_symbols)
def edit_symbols(self, state):
edit_symbols_dialog = SymbolEditor(self.widget, parent=None)
edit_symbols_dialog.exec_()
def actions(self):
return [self.edit_symbols_action]
class BasePlotExtension(PyDMExtension):
def __init__(self, widget, curve_editor_class):
super(BasePlotExtension, self).__init__(widget)
self.widget = widget
self.curve_editor_class = curve_editor_class
self.edit_curves_action = QtWidgets.QAction("Edit Curves...", self.widget)
self.edit_curves_action.triggered.connect(self.edit_curves)
def edit_curves(self, state):
edit_curves_dialog = self.curve_editor_class(self.widget, parent=self.widget)
edit_curves_dialog.exec_()
def actions(self):
return [self.edit_curves_action]
class WaveformCurveEditorExtension(BasePlotExtension):
def __init__(self, widget):
super(WaveformCurveEditorExtension, self).__init__(widget, WaveformPlotCurveEditorDialog)
class ArchiveTimeCurveEditorExtension(BasePlotExtension):
def __init__(self, widget):
super(ArchiveTimeCurveEditorExtension, self).__init__(widget, ArchiverTimePlotCurveEditorDialog)
class TimeCurveEditorExtension(BasePlotExtension):
def __init__(self, widget):
super(TimeCurveEditorExtension, self).__init__(widget, TimePlotCurveEditorDialog)
class ScatterCurveEditorExtension(BasePlotExtension):
def __init__(self, widget):
super(ScatterCurveEditorExtension, self).__init__(widget, ScatterPlotCurveEditorDialog)
class EventCurveEditorExtension(BasePlotExtension):
def __init__(self, widget):
super(EventCurveEditorExtension, self).__init__(widget, EventPlotCurveEditorDialog) |
298,670 | add use | import logging
import os
from enum import Enum
from common import get_cmd_or_die, NonZeroReturn
from plumbum.machines.local import LocalCommand
from typing import Iterable, List, Optional, Set, Tuple
rustc = get_cmd_or_die("rustc")
# TODO: Support for custom visibility paths, if needed
class RustVisibility(Enum):
Private = ""
Public = "pub "
Crate = "pub(crate) "
class CrateType(Enum):
Binary = "bin"
Library = "lib"
class RustFile:
def __init__(self, path: str) -> None:
self.path = path
def compile(self, crate_type: CrateType, save_output: bool = False,
extra_args: List[str] = []) -> Optional[LocalCommand]:
current_dir, _ = os.path.split(self.path)
extensionless_file, _ = os.path.splitext(self.path)
# run rustc
args = [
"--crate-type={}".format(crate_type.value),
"-L",
current_dir
] + extra_args
if save_output:
args.append('-o')
if crate_type == CrateType.Binary:
args.append(extensionless_file)
else:
# REVIEW: Not sure if ext is correct
args.append(extensionless_file + ".lib")
args.append(self.path)
# log the command in a format that's easy to re-run
logging.debug("rustc compile command: %s", str(rustc[args]))
retcode, stdout, stderr = rustc[args].run(retcode=None)
logging.debug("stdout:\n%s", stdout)
if retcode != 0:
raise NonZeroReturn(stderr)
if save_output:
if crate_type == CrateType.Binary:
return get_cmd_or_die(extensionless_file)
# TODO: Support saving lib file
return None
class RustMod:
def __init__(self, name: str, visibility: Optional[RustVisibility] = None) -> None:
self.name = name
self.visibility = visibility or RustVisibility.Private
def __str__(self) -> str:
return "{}mod {};\n".format(self.visibility.value, self.name)
def __hash__(self) -> int:
return hash((self.visibility, self.name))
def __eq__(self, other: object) -> bool:
if isinstance(other, RustMod):
return self.name == other.name and self.visibility == other.visibility
else:
return False
class RustUse:
def __init__(self, use: List[str], visibility: Optional[RustVisibility] = None):
self.use = "::".join(use)
self.visibility = visibility or RustVisibility.Private
def __str__(self) -> str:
return "{}use {};\n".format(self.visibility.value, self.use)
def __hash__(self) -> int:
return hash((self.use, self.visibility))
def __eq__(self, other: object) -> bool:
if isinstance(other, RustUse):
return self.use == other.use and self.visibility == other.visibility
else:
return False
# TODO: Support params, lifetimes, generics, etc if needed
class RustFunction:
def __init__(self, name: str, visibility: Optional[RustVisibility] = None,
body: Optional[List[str]] = None) -> None:
self.name = name
self.visibility = visibility or RustVisibility.Private
self.body = body or []
def __str__(self) -> str:
buffer = "{}fn {}() {{\n".format(self.visibility.value, self.name)
for line in self.body:
buffer += " " + str(line)
buffer += "}\n"
return buffer
class RustMatch:
def __init__(self, value: str, arms: List[Tuple[str, str]]) -> None:
self.value = value
self.arms = arms
def __str__(self) -> str:
buffer = "match {} {{\n".format(self.value)
for left, right in self.arms:
buffer += " {} => {},\n".format(left, right)
buffer += " }\n"
return buffer
class RustFileBuilder:
def __init__(self) -> None:
self.features: Set[str] = set()
self.pragmas: List[Tuple[str, Iterable[str]]] = []
self.extern_crates: Set[str] = set()
self.mods: Set[RustMod] = set()
self.uses: Set[RustUse] = set()
self.functions: List[RustFunction] = []
def __str__(self) -> str:
buffer = ""
for feature in self.features:
buffer += "#![feature({})]\n".format(feature)
buffer += '\n'
for pragma in self.pragmas:
buffer += "#![{}({})]\n".format(pragma[0], ",".join(pragma[1]))
buffer += '\n'
for crate in self.extern_crates:
# TODO(kkysen) `#[macro_use]` shouldn't be needed.
# Waiting on fix for https://github.com/immunant/c2rust/issues/426.
buffer += "#[macro_use] extern crate {};\n".format(crate)
buffer += '\n'
for mod in self.mods:
buffer += str(mod)
buffer += '\n'
for use in self.uses:
buffer += str(use)
buffer += '\n'
for function in self.functions:
buffer += str(function)
buffer += '\n'
return buffer
def add_feature(self, feature: str) -> None:
self.features.add(feature)
def add_features(self, features: Iterable[str]) -> None:
self.features.update(features)
def add_pragma(self, name: str, value: Iterable[str]) -> None:
self.pragmas.append((name, value))
def add_extern_crate(self, crate: str) -> None:
self.extern_crates.add(crate)
def add_extern_crates(self, crates: Iterable[str]) -> None:
self.extern_crates.update(crates)
def add_mod(self, mod: RustMod) -> None:
self.mods.add(mod)
def add_mods(self, mods: Iterable[RustMod]) -> None:
self.mods.update(mods)
def METHOD_NAME(self, use: RustUse) -> None:
self.uses.add(use)
def add_uses(self, uses: Iterable[RustUse]) -> None:
self.uses.update(uses)
def add_function(self, function: RustFunction) -> None:
self.functions.append(function)
def add_functions(self, functions: Iterable[RustFunction]) -> None:
self.functions.extend(functions)
def build(self, path: str) -> RustFile:
with open(path, 'w') as fh:
fh.write(str(self))
return RustFile(path) |
298,671 | test start | import unittest
from unittest.mock import MagicMock, patch
from pathlib import Path
from click.testing import CliRunner
from vantage6.cli.globals import APPNAME
from vantage6.cli.server import (
cli_server_start,
cli_server_configuration_list,
cli_server_files,
cli_server_import,
cli_server_new,
cli_server_stop,
cli_server_attach
)
class ServerCLITest(unittest.TestCase):
@patch("vantage6.cli.server.NetworkManager")
@patch("vantage6.cli.server.docker.types.Mount")
@patch("os.makedirs")
@patch("vantage6.cli.server.pull_if_newer")
@patch("vantage6.cli.server.ServerContext")
@patch("vantage6.cli.server.docker.from_env")
@patch("vantage6.cli.server.check_docker_running", return_value=True)
def METHOD_NAME(self, docker_check, containers, context,
pull, os_makedirs, mount, network_manager):
"""Start server without errors"""
container1 = MagicMock()
container1.containers.name = f"{APPNAME}-iknl-system"
containers.containers.list.return_value = [container1]
containers.containers.run.return_value = True
# mount.types.Mount.return_value = MagicMock()
ctx = MagicMock(
config={
'uri': 'sqlite:///file.db',
'port': 9999
},
config_file="/config.yaml",
data_dir=Path(".")
)
ctx.config_exists.return_value = True
ctx.name = 'not-running'
context.return_value = ctx
runner = CliRunner()
result = runner.invoke(cli_server_start, ["--name", "not-running"])
self.assertEqual(result.exit_code, 0)
@patch("vantage6.cli.server.ServerContext")
@patch("docker.DockerClient.containers")
@patch("vantage6.cli.server.check_docker_running", return_value=True)
def test_configuration_list(self, docker_check, containers, context):
"""Configuration list without errors."""
container1 = MagicMock()
container1.name = f"{APPNAME}-iknl-system"
containers.list.return_value = [container1]
config = MagicMock(available_environments=["application"])
config.name = "iknl"
context.available_configurations.return_value = ([config], [])
runner = CliRunner()
result = runner.invoke(cli_server_configuration_list)
self.assertEqual(result.exit_code, 0)
self.assertIsNone(result.exception)
@patch("vantage6.cli.server.ServerContext")
def test_files(self, context):
"""Configuration files without errors."""
ctx = context.return_value = MagicMock(
log_file="/log_file.log",
config_file="/iknl.yaml"
)
ctx.get_database_uri.return_value = "sqlite:///test.db"
runner = CliRunner()
result = runner.invoke(cli_server_files, ["--name", "iknl"])
self.assertIsNone(result.exception)
self.assertEqual(result.exit_code, 0)
@patch("docker.DockerClient.containers")
@patch("vantage6.cli.server._print_log_worker")
@patch("vantage6.cli.server.click.Path")
@patch("vantage6.cli.server.check_docker_running", return_value=True)
@patch("vantage6.cli.server.ServerContext")
def test_import(self, context, docker_check, click_path, log, containers):
"""Import entities without errors."""
click_path.return_value = MagicMock()
ctx = MagicMock()
ctx.name = 'some-name'
context.return_value = ctx
runner = CliRunner()
with runner.isolated_filesystem():
with open("some.yaml", "w") as fp:
fp.write("does-not-matter")
result = runner.invoke(cli_server_import, [
"--name", "iknl", "some.yaml"
])
self.assertIsNone(result.exception)
self.assertEqual(result.exit_code, 0)
@patch("vantage6.cli.server.configuration_wizard")
@patch("vantage6.cli.server.check_config_writeable")
@patch("vantage6.cli.server.ServerContext")
def test_new(self, context, permissions, wizard):
"""New configuration without errors."""
context.config_exists.return_value = False
permissions.return_value = True
wizard.return_value = "/some/file.yaml"
runner = CliRunner()
result = runner.invoke(cli_server_new, ["--name", "iknl"])
self.assertIsNone(result.exception)
self.assertEqual(result.exit_code, 0)
@patch("vantage6.cli.server.ServerContext")
@patch("vantage6.cli.server.docker.from_env")
@patch("vantage6.cli.server.check_docker_running", return_value=True)
def test_stop(self, docker_check, containers, context):
"""Stop server without errors."""
container1 = MagicMock()
container1.name = f"{APPNAME}-iknl-system-server"
containers.containers.list.return_value = [container1]
ctx = MagicMock(
config={
'rabbitmq_uri': None
}
)
context.return_value = ctx
runner = CliRunner()
result = runner.invoke(cli_server_stop, ["--name", "iknl"])
self.assertIsNone(result.exception)
self.assertEqual(result.exit_code, 0)
@patch("vantage6.cli.server.time.sleep")
@patch("docker.DockerClient.containers")
@patch("vantage6.cli.server.check_docker_running", return_value=True)
def test_attach(self, docker_check, containers, sleep):
"""Attach log to the console without errors."""
container1 = MagicMock()
container1.name = f"{APPNAME}-iknl-system-server"
containers.list.return_value = [container1]
sleep.side_effect = KeyboardInterrupt("Boom!")
runner = CliRunner()
result = runner.invoke(cli_server_attach, ["--name", "iknl"])
self.assertIsNone(result.exception)
self.assertEqual(result.exit_code, 0) |
298,672 | on pa event | from gettext import gettext as _
import logging
from typing import Dict, List, TYPE_CHECKING, Mapping, Sequence
from blueman.bluez.Device import Device
from blueman.plugins.ManagerPlugin import ManagerPlugin
from blueman.main.PulseAudioUtils import PulseAudioUtils, EventType
from blueman.gui.manager.ManagerDeviceMenu import ManagerDeviceMenu, MenuItemsProvider, DeviceMenuItem
from blueman.Functions import create_menuitem
from blueman.Sdp import AUDIO_SOURCE_SVCLASS_ID, AUDIO_SINK_SVCLASS_ID, ServiceUUID
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
if TYPE_CHECKING:
from blueman.main.PulseAudioUtils import CardInfo # noqa: F401
class PulseAudioProfile(ManagerPlugin, MenuItemsProvider):
def on_load(self) -> None:
self.devices: Dict[str, "CardInfo"] = {}
self.deferred: List[Device] = []
pa = PulseAudioUtils()
pa.connect("event", self.METHOD_NAME)
pa.connect("connected", self.on_pa_ready)
def on_pa_ready(self, _utils: PulseAudioUtils) -> None:
logging.info("connected")
for dev in self.deferred:
self.regenerate_with_device(dev['Address'])
self.deferred = []
# updates all menu instances with the following device address
def regenerate_with_device(self, device_addr: str) -> None:
for inst in ManagerDeviceMenu.__instances__:
if inst.SelectedDevice['Address'] == device_addr and not inst.is_popup:
inst.generate()
def METHOD_NAME(self, utils: PulseAudioUtils, event: int, idx: int) -> None:
logging.debug(f"{event} {idx}")
def get_card_cb(card: "CardInfo") -> None:
drivers = ("module-bluetooth-device.c",
"module-bluez4-device.c",
"module-bluez5-device.c")
if card["driver"] in drivers:
self.devices[card["proplist"]["device.string"]] = card
self.regenerate_with_device(card["proplist"]["device.string"])
if event & EventType.CARD:
logging.info("card")
if event & EventType.CHANGE:
logging.info("change")
utils.get_card(idx, get_card_cb)
elif event & EventType.REMOVE:
logging.info("remove")
else:
logging.info("add")
utils.get_card(idx, get_card_cb)
def query_pa(self, device: Device, item: Gtk.MenuItem) -> None:
def list_cb(cards: Mapping[str, "CardInfo"]) -> None:
for c in cards.values():
if c["proplist"]["device.string"] == device['Address']:
self.devices[device['Address']] = c
self.generate_menu(device, item)
return
pa = PulseAudioUtils()
pa.list_cards(list_cb)
def on_selection_changed(self, item: Gtk.CheckMenuItem, device: Device, profile: str) -> None:
if item.get_active():
pa = PulseAudioUtils()
c = self.devices[device['Address']]
def on_result(res: int) -> None:
if not res:
self.parent.infobar_update(_("Failed to change profile to %s" % profile))
pa.set_card_profile(c["index"], profile, on_result)
def generate_menu(self, device: Device, item: Gtk.MenuItem) -> None:
info = self.devices[device['Address']]
group: Sequence[Gtk.RadioMenuItem] = []
sub = Gtk.Menu()
if info:
for profile in info["profiles"]:
i = Gtk.RadioMenuItem.new_with_label(group, profile["description"])
group = i.get_group()
if profile["name"] == info["active_profile"]:
i.set_active(True)
i.connect("toggled", self.on_selection_changed,
device, profile["name"])
sub.append(i)
i.show()
item.set_submenu(sub)
item.show()
def on_request_menu_items(self, manager_menu: ManagerDeviceMenu, device: Device) -> List[DeviceMenuItem]:
audio_source = False
for uuid in device['UUIDs']:
if ServiceUUID(uuid).short_uuid in (AUDIO_SOURCE_SVCLASS_ID, AUDIO_SINK_SVCLASS_ID):
audio_source = True
break
if device['Connected'] and audio_source:
pa = PulseAudioUtils()
if not pa.connected:
self.deferred.append(device)
return []
item = create_menuitem(_("Audio Profile"), "audio-card-symbolic")
item.props.tooltip_text = _("Select audio profile for PulseAudio")
if not device['Address'] in self.devices:
self.query_pa(device, item)
else:
self.generate_menu(device, item)
else:
return []
return [DeviceMenuItem(item, DeviceMenuItem.Group.ACTIONS, 300)] |
298,673 | test settings append installed apps | from types import ModuleType
import pytest
from mock import MagicMock
from mock import patch
from kolibri.plugins.utils.settings import apply_settings
@pytest.fixture
def django_settings():
with patch("django.conf.settings") as settings_mock:
settings_mock.configured = False
yield settings_mock
@pytest.fixture
def _apply_base_settings():
with patch("kolibri.plugins.utils.settings._apply_base_settings") as apply_mock:
yield apply_mock
def test_settings_error_if_configured(_apply_base_settings):
with pytest.raises(RuntimeError):
apply_settings("")
def test_settings_module_validate_string(_apply_base_settings, django_settings):
with pytest.raises(ValueError):
apply_settings("")
def test_settings_module_validate_not_module_or_string(
_apply_base_settings, django_settings
):
with pytest.raises(TypeError):
apply_settings(0)
def test_settings_module_validate_import_string(_apply_base_settings, django_settings):
with patch("kolibri.plugins.utils.settings.importlib") as import_mock:
apply_settings("test")
assert import_mock.import_module.called
def test_settings_raise_if_overwrite_base_setting(
_apply_base_settings, django_settings
):
module_mock = ModuleType("module_mock")
setting = "TEST"
setattr(module_mock, setting, True)
plugin_settings_mock = ModuleType("settings_mock")
setattr(plugin_settings_mock, setting, False)
plugin_mock = MagicMock(settings_module=plugin_settings_mock)
plugins = [plugin_mock]
with patch("kolibri.plugins.utils.settings.registered_plugins", plugins):
with pytest.raises(ValueError):
apply_settings(module_mock)
def test_settings_raise_if_tuple_setting_not_tuple(
_apply_base_settings, django_settings
):
module_mock = ModuleType("module_mock")
setting = "INSTALLED_APPS"
setattr(module_mock, setting, ())
plugin_settings_mock = ModuleType("settings_mock")
setattr(plugin_settings_mock, setting, "app")
plugin_mock = MagicMock(settings_module=plugin_settings_mock)
plugins = [plugin_mock]
with patch("kolibri.plugins.utils.settings.registered_plugins", plugins):
with pytest.raises(ValueError):
apply_settings(module_mock)
def test_settings_warn_if_two_plugins_set_setting(
_apply_base_settings, django_settings
):
module_mock = ModuleType("module_mock")
setting = "TEST"
plugin_settings_mock = ModuleType("settings_mock")
setattr(plugin_settings_mock, setting, True)
plugin_mock1 = MagicMock(settings_module=plugin_settings_mock, module_path="test1")
plugin_mock2 = MagicMock(settings_module=plugin_settings_mock, module_path="test2")
plugins = [plugin_mock1, plugin_mock2]
with patch("kolibri.plugins.utils.settings.registered_plugins", plugins):
with patch("kolibri.plugins.utils.settings.warnings") as warnings_mock:
apply_settings(module_mock)
assert warnings_mock.warn.called
def test_settings_set_setting(_apply_base_settings, django_settings):
module_mock = ModuleType("module_mock")
setting = "TEST"
plugin_settings_mock = ModuleType("settings_mock")
setattr(plugin_settings_mock, setting, False)
plugin_mock = MagicMock(settings_module=plugin_settings_mock)
plugins = [plugin_mock]
with patch("kolibri.plugins.utils.settings.registered_plugins", plugins):
apply_settings(module_mock)
assert getattr(module_mock, setting) is False
def test_settings_ignore_setting_if_lower_case(_apply_base_settings, django_settings):
module_mock = ModuleType("module_mock")
setting = "test"
plugin_settings_mock = ModuleType("settings_mock")
setattr(plugin_settings_mock, setting, False)
plugin_mock = MagicMock(settings_module=plugin_settings_mock)
plugins = [plugin_mock]
with patch("kolibri.plugins.utils.settings.registered_plugins", plugins):
apply_settings(module_mock)
assert hasattr(module_mock, setting) is False
def test_settings_append_tuple_setting(_apply_base_settings, django_settings):
module_mock = ModuleType("module_mock")
setting = "INSTALLED_APPS"
setattr(module_mock, setting, ("first",))
plugin_settings_mock = ModuleType("settings_mock")
setattr(plugin_settings_mock, setting, ("second",))
plugin_mock = MagicMock(settings_module=plugin_settings_mock)
plugins = [plugin_mock]
with patch("kolibri.plugins.utils.settings.registered_plugins", plugins):
apply_settings(module_mock)
assert getattr(module_mock, setting) == ("first", "second")
def test_settings_append_tuple_setting_when_not_exist(
_apply_base_settings, django_settings
):
module_mock = ModuleType("module_mock")
setting = "INSTALLED_APPS"
plugin_settings_mock = ModuleType("settings_mock")
setattr(plugin_settings_mock, setting, ("second",))
plugin_mock = MagicMock(settings_module=plugin_settings_mock)
plugins = [plugin_mock]
with patch("kolibri.plugins.utils.settings.registered_plugins", plugins):
apply_settings(module_mock)
assert getattr(module_mock, setting) == ("second",)
def METHOD_NAME(django_settings):
module_mock = ModuleType("module_mock")
plugin_mock = MagicMock(settings_module=None, module_path="test")
plugins = [plugin_mock]
with patch("kolibri.plugins.utils.settings.registered_plugins", plugins):
apply_settings(module_mock)
assert getattr(module_mock, "INSTALLED_APPS")[0].name == "test"
def test_settings_append_locale_path_external(django_settings):
module_mock = ModuleType("module_mock")
plugin_mock = MagicMock(settings_module=None, module_path="test")
plugins = [plugin_mock]
with patch("kolibri.plugins.utils.settings.registered_plugins", plugins), patch(
"kolibri.plugins.utils.settings.i18n.get_installed_app_locale_path",
return_value="test",
):
apply_settings(module_mock)
assert getattr(module_mock, "LOCALE_PATHS") == ("test",)
def test_settings_not_append_locale_path_internal(django_settings):
module_mock = ModuleType("module_mock")
plugin_mock = MagicMock(settings_module=None, module_path="kolibri.test")
plugins = [plugin_mock]
with patch("kolibri.plugins.utils.settings.registered_plugins", plugins), patch(
"kolibri.plugins.utils.settings.i18n.get_installed_app_locale_path",
return_value="test",
), patch("kolibri.plugins.utils.settings.AppConfig"):
apply_settings(module_mock)
assert not hasattr(module_mock, "LOCALE_PATHS") |
298,674 | libosmesa headers | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
from spack.package import *
class Mesa18(AutotoolsPackage):
"""Mesa is an open-source implementation of the OpenGL specification
- a system for rendering interactive 3D graphics."""
homepage = "https://www.mesa3d.org"
maintainers("v-dobrev", "ChristianTackeGSI")
# Note that we always want to build from the git repo instead of a
# tarball since the tarball has pre-generated files for certain versions
# of LLVM while the git repo doesn't so it can adapt at build time to
# whatever version of LLVM you're using.
git = "https://gitlab.freedesktop.org/mesa/mesa.git"
version("18.3.6", tag="mesa-18.3.6", commit="11049bcff86da8013a4f63bd68daf637e3af22f3")
depends_on("autoconf", type="build")
depends_on("automake", type="build")
depends_on("libtool", type="build")
depends_on("m4", type="build")
depends_on("pkgconfig", type="build")
depends_on("binutils+plugins", when=(sys.platform != "darwin"), type="build")
depends_on("bison", type="build")
depends_on("flex", type="build")
depends_on("gettext", type="build")
depends_on("pkgconfig", type="build")
depends_on("python@:3.8", type="build") # https://github.com/spack/spack/issues/28219
depends_on("py-mako@0.8.0:", type="build")
depends_on("libxml2")
depends_on("zlib-api")
depends_on("expat")
depends_on("ncurses+termlib")
# Internal options
variant("llvm", default=True, description="Enable LLVM.")
variant(
"swr",
values=spack.variant.DisjointSetsOfValues(
("none",), ("auto",), ("avx", "avx2", "knl", "skx")
)
.with_non_feature_values("auto")
.with_non_feature_values("none")
.with_default("auto"),
when="+llvm",
description="Enable the SWR driver.",
)
# Front ends
variant("osmesa", default=True, description="Enable the OSMesa frontend.")
is_linux = sys.platform.startswith("linux")
variant("glx", default=is_linux, description="Enable the GLX frontend.")
# Additional backends
variant("opengles", default=False, description="Enable OpenGL ES support.")
# Provides
provides("libglx", when="+glx")
provides("libosmesa", when="+osmesa")
# Variant dependencies
depends_on("libllvm@6:10", when="+llvm")
depends_on("libx11", when="+glx")
depends_on("libxcb", when="+glx")
depends_on("libxext", when="+glx")
depends_on("glproto@1.4.14:", when="+glx")
# Require at least 1 front-end
conflicts("~osmesa ~glx")
# Prevent an unnecessary xcb-dri dependency
patch("autotools-x11-nodri.patch")
# Backport Mesa MR#6053 to prevent multiply-defined symbols
patch("multiple-symbols_hash.patch", when="@:20.1.4%gcc@10:")
def setup_build_environment(self, env):
env.set("PYTHON", self.spec["python"].command.path)
def autoreconf(self, spec, prefix):
which("autoreconf")("--force", "--verbose", "--install")
def configure_args(self):
spec = self.spec
args = [
"LDFLAGS={0}".format(self.spec["ncurses"].libs.search_flags),
"--enable-shared",
"--disable-static",
"--disable-libglvnd",
"--disable-nine",
"--disable-omx-bellagio",
"--disable-omx-tizonia",
"--disable-opencl",
"--disable-opencl-icd",
"--disable-va",
"--disable-vdpau",
"--disable-xa",
"--disable-xvmc",
"--disable-osmesa",
"--with-vulkan-drivers=",
"--disable-egl",
"--disable-gbm",
"--disable-dri",
"--enable-opengl",
]
args_platforms = []
args_gallium_drivers = ["swrast"]
args_dri_drivers = []
if spec.target.family == "arm" or spec.target.family == "aarch64":
args.append("--disable-libunwind")
num_frontends = 0
if "+osmesa" in spec:
num_frontends += 1
args.append("--enable-gallium-osmesa")
else:
args.append("--disable-gallium-osmesa")
if "+glx" in spec:
num_frontends += 1
args.append("--enable-glx=gallium-xlib")
args_platforms.append("x11")
else:
args.append("--disable-glx")
if "+opengles" in spec:
args.extend(["--enable-gles1", "--enable-gles2"])
else:
args.extend(["--disable-gles1", "--disable-gles2"])
if num_frontends > 1:
args.append("--enable-shared-glapi")
else:
args.append("--disable-shared-glapi")
if "+llvm" in spec:
args.append("--enable-llvm")
args.append("--with-llvm-prefix=%s" % spec["libllvm"].prefix)
if "+llvm_dylib" in spec["libllvm"]:
args.append("--enable-llvm-shared-libs")
else:
args.append("--disable-llvm-shared-libs")
else:
args.append("--disable-llvm")
args_swr_arches = []
if "swr=auto" in spec:
if "avx" in spec.target:
args_swr_arches.append("avx")
if "avx2" in spec.target:
args_swr_arches.append("avx2")
if "avx512f" in spec.target:
if "avx512er" in spec.target:
args_swr_arches.append("knl")
if "avx512bw" in spec.target:
args_swr_arches.append("skx")
else:
if "swr=avx" in spec:
args_swr_arches.append("avx")
if "swr=avx2" in spec:
args_swr_arches.append("avx2")
if "swr=knl" in spec:
args_swr_arches.append("knl")
if "swr=skx" in spec:
args_swr_arches.append("skx")
if args_swr_arches:
args_gallium_drivers.append("swr")
args.append("--with-swr-archs=" + ",".join(args_swr_arches))
# Add the remaining list args
args.append("--with-platforms=" + ",".join(args_platforms))
args.append("--with-gallium-drivers=" + ",".join(args_gallium_drivers))
args.append("--with-dri-drivers=" + ",".join(args_dri_drivers))
return args
@property
def libs(self):
spec = self.spec
libs_to_seek = set()
if "platform=windows" in spec:
libs_to_seek.add("opengl32")
if "+osmesa" in spec:
libs_to_seek.add("osmesa")
else:
libs_to_seek.add("libGL")
if "+osmesa" in spec:
libs_to_seek.add("libOSMesa")
if "+glx" in spec:
libs_to_seek.add("libGL")
if "+opengles" in spec:
libs_to_seek.add("libGLESv1_CM")
libs_to_seek.add("libGLESv2")
return find_libraries(
list(libs_to_seek), root=self.spec.prefix, shared=True, recursive=True
)
@property
def libglx_headers(self):
return find_headers("GL/glx", root=self.spec.prefix.include, recursive=False)
@property
def libglx_libs(self):
return find_libraries("libGL", root=self.spec.prefix, recursive=True)
@property
def METHOD_NAME(self):
return find_headers("GL/osmesa", root=self.spec.prefix.include, recursive=False)
@property
def libosmesa_libs(self):
if "platform=windows" in self.spec:
lib_name = "osmesa"
else:
lib_name = "libOSMesa"
return find_libraries(lib_name, root=self.spec.prefix, recursive=True) |
298,675 | test global op | from io import StringIO
import pytest
from xdsl.dialects import arith, builtin, llvm
from xdsl.printer import Printer
from xdsl.utils.exceptions import VerifyException
def test_llvm_pointer_ops():
module = builtin.ModuleOp(
[
idx := arith.Constant.from_int_and_width(0, 64),
ptr := llvm.AllocaOp(idx, builtin.i32),
val := llvm.LoadOp(ptr),
nullptr := llvm.NullOp(),
alloc_ptr := llvm.AllocaOp(idx, elem_type=builtin.IndexType()),
llvm.LoadOp(alloc_ptr),
store := llvm.StoreOp(
val, ptr, alignment=32, volatile=True, nontemporal=True
),
]
)
module.verify()
assert len(alloc_ptr.res.uses) == 1
assert ptr.size is idx.result
assert isinstance(ptr.res.type, llvm.LLVMPointerType)
assert ptr.res.type.type == builtin.i32
assert isinstance(ptr.res.type.addr_space, builtin.NoneAttr)
assert "volatile_" in store.attributes
assert "nontemporal" in store.attributes
assert "alignment" in store.attributes
assert "ordering" in store.attributes
assert isinstance(nullptr.nullptr.type, llvm.LLVMPointerType)
assert isinstance(nullptr.nullptr.type.type, builtin.NoneAttr)
assert isinstance(nullptr.nullptr.type.addr_space, builtin.NoneAttr)
def test_llvm_ptr_to_int_to_ptr():
idx = arith.Constant.from_int_and_width(0, 64)
ptr = llvm.IntToPtrOp(idx, ptr_type=builtin.i32)
int_val = llvm.PtrToIntOp(ptr)
assert ptr.input == idx.result
assert isinstance(ptr.output.type, llvm.LLVMPointerType)
assert ptr.output.type.type == builtin.i32
assert int_val.input == ptr.output
assert isinstance(int_val.output.type, builtin.IntegerType)
assert int_val.output.type.width.data == 64
def test_llvm_pointer_type():
assert llvm.LLVMPointerType.typed(builtin.i64).is_typed()
assert llvm.LLVMPointerType.typed(builtin.i64).type is builtin.i64
assert isinstance(
llvm.LLVMPointerType.typed(builtin.i64).addr_space, builtin.NoneAttr
)
assert not llvm.LLVMPointerType.opaque().is_typed()
assert isinstance(llvm.LLVMPointerType.opaque().type, builtin.NoneAttr)
assert isinstance(llvm.LLVMPointerType.opaque().addr_space, builtin.NoneAttr)
def test_llvm_getelementptr_op_invalid_construction():
size = arith.Constant.from_int_and_width(1, 32)
opaque_ptr = llvm.AllocaOp(size, builtin.i32, as_untyped_ptr=True)
# check that passing an opaque pointer to GEP without a pointee type fails
with pytest.raises(ValueError):
llvm.GEPOp(
opaque_ptr,
indices=[1],
result_type=llvm.LLVMPointerType.typed(builtin.i32),
)
# check that non-pointer arguments fail
with pytest.raises(ValueError):
llvm.GEPOp(
size,
indices=[1],
result_type=llvm.LLVMPointerType.opaque(),
)
def test_llvm_getelementptr_op():
size = arith.Constant.from_int_and_width(1, 32)
ptr = llvm.AllocaOp(size, builtin.i32)
ptr_type = llvm.LLVMPointerType.typed(ptr.res.type)
opaque_ptr = llvm.AllocaOp(size, builtin.i32, as_untyped_ptr=True)
# check that construction with static-only offsets and inbounds attr works:
gep1 = llvm.GEPOp.from_mixed_indices(
ptr,
indices=[1],
result_type=ptr_type,
inbounds=True,
)
assert "inbounds" in gep1.attributes
assert gep1.result.type == ptr_type
assert gep1.ptr == ptr.res
assert "elem_type" not in gep1.attributes
assert len(gep1.rawConstantIndices.data) == 1
assert len(gep1.ssa_indices) == 0
# check that construction with opaque pointer works:
gep2 = llvm.GEPOp.from_mixed_indices(
opaque_ptr,
indices=[1],
result_type=ptr_type,
pointee_type=builtin.i32,
)
assert "elem_type" in gep2.attributes
assert "inbounds" not in gep2.attributes
assert gep2.result.type == ptr_type
assert len(gep1.rawConstantIndices.data) == 1
assert len(gep1.ssa_indices) == 0
# check GEP with mixed args
gep3 = llvm.GEPOp.from_mixed_indices(ptr, [1, size], ptr_type)
assert len(gep3.rawConstantIndices.data) == 2
assert len(gep3.ssa_indices) == 1
def test_array_type():
array_type = llvm.LLVMArrayType.from_size_and_type(10, builtin.i32)
assert isinstance(array_type.size, builtin.IntAttr)
assert array_type.size.data == 10
assert array_type.type == builtin.i32
def test_linkage_attr():
linkage = llvm.LinkageAttr("internal")
assert isinstance(linkage.linkage, builtin.StringAttr)
assert linkage.linkage.data == "internal"
def test_linkage_attr_unknown_str():
with pytest.raises(VerifyException):
llvm.LinkageAttr("unknown")
def METHOD_NAME():
global_op = llvm.GlobalOp(
builtin.i32,
"testsymbol",
"internal",
10,
True,
value=builtin.IntegerAttr(76, 32),
alignment=8,
unnamed_addr=0,
section="test",
)
assert global_op.global_type == builtin.i32
assert isinstance(global_op.sym_name, builtin.StringAttr)
assert global_op.sym_name.data == "testsymbol"
assert isinstance(global_op.section, builtin.StringAttr)
assert global_op.section.data == "test"
assert isinstance(global_op.addr_space, builtin.IntegerAttr)
assert global_op.addr_space.value.data == 10
assert isinstance(global_op.alignment, builtin.IntegerAttr)
assert global_op.alignment.value.data == 8
assert isinstance(global_op.unnamed_addr, builtin.IntegerAttr)
assert global_op.unnamed_addr.value.data == 0
assert isinstance(global_op.linkage, llvm.LinkageAttr)
assert isinstance(global_op.value, builtin.IntegerAttr)
assert global_op.value.value.data == 76
def test_addressof_op():
ptr_type = llvm.LLVMPointerType.typed(builtin.i32)
address_of = llvm.AddressOfOp("test", ptr_type)
assert isinstance(address_of.global_name, builtin.SymbolRefAttr)
assert address_of.global_name.root_reference.data == "test"
assert address_of.result.type == ptr_type
def test_implicit_void_func_return():
func_type = llvm.LLVMFunctionType([])
assert isinstance(func_type.output, llvm.LLVMVoidType)
def test_calling_conv():
cconv = llvm.CallingConventionAttr("cc 11")
cconv.verify()
assert cconv.cconv_name == "cc 11"
with pytest.raises(VerifyException, match='Invalid calling convention "nooo"'):
llvm.CallingConventionAttr("nooo").verify()
def test_variadic_func():
func_type = llvm.LLVMFunctionType([], is_variadic=True)
io = StringIO()
p = Printer(stream=io)
p.print_attribute(func_type)
assert io.getvalue() == """!llvm.func<void (...)>""" |
298,676 | test named temporay file context manager | # -*- coding: utf-8 -*-
import os
import copy
from unittest import mock
import numpy as np
import pytest
from requests import HTTPError
from obspy.core.util.base import (NamedTemporaryFile, get_dependency_version,
download_to_file, sanitize_filename,
create_empty_data_chunk, ComparingObject)
class TestUtilBase:
"""
Test suite for obspy.core.util.base
"""
def test_get_matplotlib_version(self):
"""
Tests for the get_matplotlib_version() function as it continues to
cause problems.
"""
versions = (("1.2.3", [1, 2, 3]), ("0.9.11", [0, 9, 11]),
("0.9.svn", [0, 9, 0]), ("1.1.1~rc1-1", [1, 1, 1]),
("1.2.x", [1, 2, 0]), ("1.3.1rc2", [1, 3, 1]))
for version_string, expected in versions:
with mock.patch('pkg_resources.get_distribution') as p:
class _D(object):
version = version_string
p.return_value = _D()
got = get_dependency_version('matplotlib')
assert expected == got
def METHOD_NAME(self):
"""
Tests the automatic closing/deleting of NamedTemporaryFile using the
context manager.
"""
content = b"burn after writing"
# write something to tempfile and check closing/deletion afterwards
with NamedTemporaryFile() as tf:
filename = tf.name
tf.write(content)
assert not os.path.exists(filename)
# write something to tempfile and check that it is written correctly
with NamedTemporaryFile() as tf:
filename = tf.name
tf.write(content)
tf.close()
with open(filename, 'rb') as fh:
tmp_content = fh.read()
assert content == tmp_content
assert not os.path.exists(filename)
# check that closing/deletion works even when nothing is done with file
with NamedTemporaryFile() as tf:
filename = tf.name
assert not os.path.exists(filename)
def test_mock_read_inventory_http_errors(self):
"""
Tests HTTP Error on 204, 400, and 500
"""
url = "http://obspy.org"
for response_tuple in [("204", "No Content"), ("400", "Bad Request"),
("500", "Internal Server Error")]:
code = response_tuple[0]
reason = response_tuple[1]
with mock.patch("requests.get") as mocked_get:
mocked_get.return_value.status_code = code
mocked_get.return_value.reason = reason
msg = "%s HTTP Error: %s for url: %s" % (code, reason, url)
with pytest.raises(HTTPError, match=msg):
download_to_file(url, None)
def test_sanitize_filename(self):
assert sanitize_filename("example.mseed") == \
"example.mseed"
assert sanitize_filename("Example.mseed") == \
"Example.mseed"
assert sanitize_filename("example.mseed?raw=True") == \
"example.mseedrawTrue"
assert sanitize_filename("Example.mseed?raw=true") == \
"Example.mseedrawtrue"
def test_create_empty_data_chunk(self):
out = create_empty_data_chunk(3, 'int', 10)
assert isinstance(out, np.ndarray)
# The default dtype for an integer (np.int_) is a `C long` which is
# only 32 bits on windows. Thus we have to allow both.
assert out.dtype in (np.int32, np.int64)
np.testing.assert_allclose(out, [10, 10, 10])
out = create_empty_data_chunk(6, np.complex128, 0)
assert isinstance(out, np.ndarray)
assert out.dtype == np.complex128
np.testing.assert_allclose(out, np.zeros(6, dtype=np.complex128))
# Fully masked output.
out = create_empty_data_chunk(3, 'f')
assert isinstance(out, np.ma.MaskedArray)
assert out.dtype == np.float32
np.testing.assert_allclose(out.mask, [True, True, True])
def test_comparing_object_eq(self):
co = ComparingObject()
# Compare to other types
assert co != 5
assert co is not None
assert co != object()
# Compare same type, different instance, with attributes
co.at = 3
deep_copy = copy.deepcopy(co)
assert co == deep_copy
deep_copy.at = 0
assert co != deep_copy |
298,677 | sync resync | import datetime
import json
import core.exceptions as ex
import core.status
from .. import Sync, notify
from core.objects.svcdict import KEYS
from utilities.proc import justcall
DRIVER_GROUP = "sync"
DRIVER_BASENAME = "radossnap"
KEYWORDS = [
{
"keyword": "images",
"convert": "list",
"required": True,
"text": "The rados image names handled by this sync resource. whitespace separated."
},
]
KEYS.register_driver(
DRIVER_GROUP,
DRIVER_BASENAME,
name=__name__,
keywords=KEYWORDS,
)
def driver_capabilities(node=None):
from utilities.proc import which
data = []
if which("rbd"):
data.append("sync.radossnap")
return data
class SyncRadossnap(Sync):
def __init__(self,
images=None,
client_id=None,
keyring=None,
**kwargs):
super(SyncRadossnap, self).__init__(type="sync.radossnap", **kwargs)
if images is None:
images = []
self.fmt_label("snap", images)
self.images = images
if not client_id.startswith("client."):
client_id = "client."+client_id
self.client_id = client_id
self.keyring = keyring
self.list_data = None
self.date_fmt = "%Y-%m-%d.%H:%M:%S"
def __str__(self):
return "%s images=%s" % (
super(SyncRadossnap, self).__str__(),
", ".join(self.images)
)
def recreate(self):
self.validate_image_fmt()
for image in self.images:
self._recreate(image)
def _recreate(self, image):
snapnames = self._get_all(image)
last_date, last_name = self._get_last(image)
snapname = self.snap_basename() + datetime.datetime.now().strftime(self.date_fmt)
cmd = self.rbd_cmd()+['snap', 'create', image, '--snap', snapname]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.Error
for snapname in snapnames:
self.rm(snapname)
def rm(self, image):
cmd = self.rbd_cmd()+['snap', 'rm', image]
ret, out, err = self.vcall(cmd)
def unprotect(self, image):
cmd = self.rbd_cmd()+['snap', 'unprotect', image]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.Error
def get_all(self):
data = {}
for image in self.images:
data[image] = self._get_all(image)
return data
def _get_all(self, image):
data = self.list()
retained = []
prefix = image+"@"+self.snap_basename()
for name in data:
if not name.startswith(prefix):
continue
retained.append(name)
return retained
def get_last(self):
data = {}
for image in self.images:
data[image] = self._get_last(image)
return data
def _get_last(self, image):
data = self.list()
retained = []
prefix = image+"@"+self.snap_basename()
for name in data:
if not name.startswith(prefix):
continue
try:
date = datetime.datetime.strptime(name, prefix+self.date_fmt)
except:
continue
retained.append((date, name))
if len(retained) == 0:
return None, None
last_date, last_name = sorted(retained)[-1]
return last_date, last_name
def rbd_cmd(self):
l = ["rbd"]
if self.client_id:
l += ["-n", self.client_id]
if self.keyring:
l += ["--keyring", self.keyring]
return l
def snap_basename(self):
return self.rid+"."
def get_pools(self):
l = set()
for image in self.images:
pool = image.split("/")[0]
l.add(pool)
return l
def list(self):
if self.list_data is not None:
return self.list_data
data = {}
for pool in self.get_pools():
data.update(self._list(pool))
self.list_data = data
return data
def _list(self, pool):
cmd = self.rbd_cmd() + ["ls", "-l", pool, "--format", "json"]
out, err, ret = justcall(cmd)
data = {}
try:
_data = json.loads(out)
except Exception as e:
self.status_log(str(e))
_data = []
for img_data in _data:
idx = pool+"/"+img_data['image']
if "snapshot" in img_data:
idx += "@"+img_data['snapshot']
data[idx] = img_data
return data
def sync_status(self, verbose=False):
try:
self.validate_image_fmt()
except Exception as e:
self.status_log(str(e))
return core.status.WARN
try:
data = self.get_last()
except Exception as e:
self.status_log(str(e))
return core.status.WARN
nosnap = []
expired = []
ok = []
for image in self.images:
date, snapname = data[image]
if date is None:
nosnap.append(image)
elif date < datetime.datetime.now() - datetime.timedelta(seconds=self.sync_max_delay):
expired.append(image)
else:
ok.append(image)
r = core.status.UP
if len(nosnap) > 0:
self.status_log("no snap found for images: "+", ".join(nosnap))
r = core.status.WARN
if len(expired) > 0:
self.status_log("snap too old for images: "+", ".join(expired))
r = core.status.WARN
return r
@notify
def sync_update(self):
self.recreate()
def METHOD_NAME(self):
self.recreate()
def validate_image_fmt(self):
l = []
for image in self.images:
if image.count("/") != 1:
l.append(image)
if len(l) > 0:
raise ex.Error("wrong format (expected pool/image): "+", ".join(l))
def fmt_label(self, t, l):
self.label = t+" rados %s"%', '.join(l)
if len(self.label) > 80:
self.label = self.label[:76]+"..." |
298,678 | tg emulation igmp querier control | import sys
class TGStubs(object):
def __init__(self, logger):
self.logger = logger
def override(self, name=None):
name = name or sys._getframe(1).f_code.co_name
self.logger.error("{} should be overriden".format(name))
return {}
def clean_all(self):
return self.override()
def show_status(self):
return self.override()
def tg_interface_handle(self, ret_ds):
return self.override()
def tg_interface_config(self, *args, **kwargs):
return self.override()
def tg_save_xml(self, **kwargs):
return self.override()
def tg_test_control(self, **kwargs):
return self.override()
def tg_packet_control(self, *args, **kwargs):
return self.override()
def tg_traffic_control(self, *args, **kwargs):
return self.override()
def tg_topology_config(self, **kwargs):
return self.override()
def tg_packet_stats(self, *args, **kwargs):
return self.override()
def tg_traffic_stats(self, *args, **kwargs):
return self.override()
def tg_interface_stats(self, **kwargs):
return self.override()
def tg_packet_config_buffers(self, **kwargs):
return self.override()
def tg_packet_config_filter(self, **kwargs):
return self.override()
def tg_packet_config_triggers(self, **kwargs):
return self.override()
def tg_convert_porthandle_to_vport(self, **kwargs):
return self.override()
def tg_protocol_info(self, **kwargs):
return self.override()
def tg_withdraw_bgp_routes(self, route_handle):
return self.override()
def tg_readvertise_bgp_routes(self, handle, route_handle):
return self.override()
def tg_emulation_bgp_config(self, **kwargs):
return self.override()
def tg_emulation_bgp_control(self, **kwargs):
return self.override()
def tg_emulation_bgp_route_config(self, **kwargs):
return self.override()
def tg_emulation_ospf_config(self, **kwargs):
return self.override()
def tg_emulation_ospf_control(self, **kwargs):
return self.override()
def tg_emulation_ospf_route_config(self, **kwargs):
return self.override()
def tg_emulation_ospf_lsa_config(self, **kwargs):
return self.override()
def tg_emulation_ospf_network_group_config(self, **kwargs):
return self.override()
def tg_emulation_ospf_topology_route_config(self, **kwargs):
return self.override()
def tg_emulation_igmp_config(self, **kwargs):
return self.override()
def tg_emulation_igmp_group_config(self, **kwargs):
return self.override()
def tg_emulation_igmp_querier_config(self, **kwargs):
return self.override()
def tg_emulation_igmp_control(self, **kwargs):
return self.override()
def METHOD_NAME(self, **kwargs):
return self.override()
def tg_emulation_mld_querier_control(self, **kwargs):
return self.override()
def tg_emulation_mld_control(self, **kwargs):
return self.override()
def tg_emulation_multicast_source_config(self, **kwargs):
return self.override()
def tg_multivalue_config(self, **kwargs):
return self.override()
def tg_emulation_dhcp_config(self, **kwargs):
return self.override()
def tg_emulation_dhcp_control(self, **kwargs):
return self.override()
def tg_emulation_dhcp_group_config(self, **kwargs):
return self.override()
def tg_emulation_dhcp_server_config(self, **kwargs):
return self.override()
def tg_emulation_dhcp_server_control(self, **kwargs):
return self.override()
def tg_emulation_dhcp_server_stats(self, **kwargs):
return self.override()
def tg_emulation_dhcp_server_relay_agent_config(self, **kwargs):
return self.override()
def tg_emulation_dhcp_stats(self, **kwargs):
return self.override()
def tg_emulation_dotonex_config(self, **kwargs):
return self.override()
def tg_emulation_dotonex_control(self, **kwargs):
return self.override()
def tg_emulation_dotonex_info(self, **kwargs):
return self.override()
def tg_ptp_over_ip_control(self, **kwargs):
return self.override()
def tg_ptp_over_mac_control(self, **kwargs):
return self.override()
def tg_ptp_over_ip_config(self, **kwargs):
return self.override()
def tg_ptp_over_mac_config(self, **kwargs):
return self.override()
def tg_ptp_over_ip_stats(self, **kwargs):
return self.override()
def tg_ptp_over_mac_stats(self, **kwargs):
return self.override()
def tg_emulation_ipv6_autoconfig(self, **kwargs):
return self.override()
def tg_emulation_ipv6_autoconfig_control(self, **kwargs):
return self.override() |
298,679 | run llama test | import argparse
import os
import time
import torch
from torch.profiler import ProfilerActivity, profile, record_function
from transformers import LlamaForCausalLM, LlamaTokenizer
import colossalai
from colossalai.inference.tensor_parallel.engine import TPInferEngine
from colossalai.logging import disable_existing_loggers
from colossalai.shardformer import ShardConfig
from colossalai.testing import clear_cache_before_run, rerun_if_address_is_in_use, spawn
os.environ['TRANSFORMERS_NO_ADVISORY_WARNINGS'] = 'true'
def init_to_get_rotary(self, base=10000):
self.config.head_dim_ = self.config.hidden_size // self.config.num_attention_heads
if not hasattr(self.config, "rope_scaling"):
rope_scaling_factor = 1.0
else:
rope_scaling_factor = self.config.rope_scaling.factor if self.config.rope_scaling is not None else 1.0
if hasattr(self.config, "max_sequence_length"):
max_seq_len = self.config.max_sequence_length
elif hasattr(self.config, "max_position_embeddings"):
max_seq_len = self.config.max_position_embeddings * rope_scaling_factor
else:
max_seq_len = 2048 * rope_scaling_factor
base = float(base)
inv_freq = 1.0 / (base**(torch.arange(0, self.config.head_dim_, 2, device="cpu", dtype=torch.float32) /
self.config.head_dim_))
t = torch.arange(max_seq_len + 1024 * 64, device="cpu", dtype=torch.float32) / rope_scaling_factor
freqs = torch.outer(t, inv_freq)
self._cos_cached = torch.cos(freqs).to(torch.float16).cuda()
self._sin_cached = torch.sin(freqs).to(torch.float16).cuda()
return
def print_perf_stats(latency_set, config, bs, warmup=3):
# trim warmup queries
latency_set = list(latency_set)
latency_set = latency_set[warmup:]
count = len(latency_set)
if count > 0:
latency_set.sort()
avg = sum(latency_set) / count
num_layers = getattr(config, "num_layers", config.num_hidden_layers)
num_parameters = num_layers * config.hidden_size * config.hidden_size * 12
num_bytes = 2
print("Avg Per Token Latency: {0:8.2f} ms".format(avg * 1000))
print("Avg BW: {0:8.2f} GB/s".format(1 / avg * num_parameters * num_bytes / 1e9))
print("Avg flops: {0:8.2f} TFlops/s".format(1 / avg * num_parameters * num_bytes * bs / 1e12))
def METHOD_NAME(args):
llama_model_path = args.path
max_batch_size = args.batch_size
max_input_len = args.input_len
max_output_len = args.output_len
tokenizer = LlamaTokenizer.from_pretrained(llama_model_path)
tokenizer.pad_token_id = tokenizer.unk_token_id
model = LlamaForCausalLM.from_pretrained(llama_model_path, pad_token_id=tokenizer.eos_token_id)
init_to_get_rotary(model.model, base=10000)
model = model.half()
model_config = model.config
shard_config = ShardConfig(enable_tensor_parallelism=True if args.tp_size > 1 else False, inference_only=True)
infer_engine = TPInferEngine(model, shard_config, max_batch_size, max_input_len, max_output_len)
generate_kwargs = dict(max_new_tokens=max_output_len, do_sample=False)
input_tokens = {
"input_ids": torch.randint(1, 1000, (max_batch_size, max_input_len), device='cuda'),
"attention_mask": torch.ones((max_batch_size, max_input_len), device='cuda')
}
iters = 10
times = []
for i in range(iters):
torch.cuda.synchronize()
start = time.time()
outputs = infer_engine.generate(input_tokens, **generate_kwargs)
torch.cuda.synchronize()
end = time.time()
out_len = outputs.shape[1]
print("generation time {} s".format(str(end - start)))
times.append((end - start) / (out_len - max_input_len))
print("outputs, ", len(outputs))
print_perf_stats(times, model_config, max_batch_size)
with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:
with record_function("model_inference"):
torch.cuda.synchronize()
outputs = infer_engine.generate(input_tokens, **generate_kwargs)
torch.cuda.synchronize()
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
def check_llama(rank, world_size, port, args):
disable_existing_loggers()
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
METHOD_NAME(args)
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_llama(args):
spawn(check_llama, args.tp_size, args=args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', type=str, help='Model path', required=True)
parser.add_argument('-tp', '--tp_size', type=int, default=1, help='Tensor parallel size')
parser.add_argument('-b', '--batch_size', type=int, default=16, help='Maximum batch size')
parser.add_argument('--input_len', type=int, default=1024, help='Maximum input length')
parser.add_argument('--output_len', type=int, default=128, help='Maximum output length')
args = parser.parse_args()
test_llama(args) |
298,680 | logon | """Implementation of magic functions for IPython's own logging.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import sys
# Our own packages
from IPython.core.magic import Magics, magics_class, line_magic
from warnings import warn
from IPython.utils.py3compat import str_to_unicode
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
@magics_class
class LoggingMagics(Magics):
"""Magics related to all logging machinery."""
@line_magic
def logstart(self, parameter_s=''):
"""Start logging anywhere in a session.
%logstart [-o|-r|-t] [log_name [log_mode]]
If no name is given, it defaults to a file named 'ipython_log.py' in your
current directory, in 'rotate' mode (see below).
'%logstart name' saves to file 'name' in 'backup' mode. It saves your
history up to that point and then continues logging.
%logstart takes a second optional parameter: logging mode. This can be one
of (note that the modes are given unquoted):
append
Keep logging at the end of any existing file.
backup
Rename any existing file to name~ and start name.
global
Append to a single logfile in your home directory.
over
Overwrite any existing log.
rotate
Create rotating logs: name.1~, name.2~, etc.
Options:
-o
log also IPython's output. In this mode, all commands which
generate an Out[NN] prompt are recorded to the logfile, right after
their corresponding input line. The output lines are always
prepended with a '#[Out]# ' marker, so that the log remains valid
Python code.
Since this marker is always the same, filtering only the output from
a log is very easy, using for example a simple awk call::
awk -F'#\\[Out\\]# ' '{if($2) {print $2}}' ipython_log.py
-r
log 'raw' input. Normally, IPython's logs contain the processed
input, so that user lines are logged in their final form, converted
into valid Python. For example, %Exit is logged as
_ip.magic("Exit"). If the -r flag is given, all input is logged
exactly as typed, with no transformations applied.
-t
put timestamps before each input line logged (these are put in
comments).
"""
opts,par = self.parse_options(parameter_s,'ort')
log_output = 'o' in opts
log_raw_input = 'r' in opts
timestamp = 't' in opts
logger = self.shell.logger
# if no args are given, the defaults set in the logger constructor by
# ipython remain valid
if par:
try:
logfname,logmode = par.split()
except:
logfname = par
logmode = 'backup'
else:
logfname = logger.logfname
logmode = logger.logmode
# put logfname into rc struct as if it had been called on the command
# line, so it ends up saved in the log header Save it in case we need
# to restore it...
old_logfile = self.shell.logfile
if logfname:
logfname = os.path.expanduser(logfname)
self.shell.logfile = logfname
loghead = u'# IPython log file\n\n'
try:
logger.logstart(logfname, loghead, logmode, log_output, timestamp,
log_raw_input)
except:
self.shell.logfile = old_logfile
warn("Couldn't start log: %s" % sys.exc_info()[1])
else:
# log input history up to this point, optionally interleaving
# output if requested
if timestamp:
# disable timestamping for the previous history, since we've
# lost those already (no time machine here).
logger.timestamp = False
if log_raw_input:
input_hist = self.shell.history_manager.input_hist_raw
else:
input_hist = self.shell.history_manager.input_hist_parsed
if log_output:
log_write = logger.log_write
output_hist = self.shell.history_manager.output_hist
for n in range(1,len(input_hist)-1):
log_write(input_hist[n].rstrip() + u'\n')
if n in output_hist:
log_write(str_to_unicode(repr(output_hist[n])),'output')
else:
logger.log_write(u'\n'.join(input_hist[1:]))
logger.log_write(u'\n')
if timestamp:
# re-enable timestamping
logger.timestamp = True
print ('Activating auto-logging. '
'Current session state plus future input saved.')
logger.logstate()
@line_magic
def logstop(self, parameter_s=''):
"""Fully stop logging and close log file.
In order to start logging again, a new %logstart call needs to be made,
possibly (though not necessarily) with a new filename, mode and other
options."""
self.shell.logger.logstop()
@line_magic
def logoff(self, parameter_s=''):
"""Temporarily stop logging.
You must have previously started logging."""
self.shell.logger.switch_log(0)
@line_magic
def METHOD_NAME(self, parameter_s=''):
"""Restart logging.
This function is for restarting logging which you've temporarily
stopped with %logoff. For starting logging for the first time, you
must use the %logstart function, which allows you to specify an
optional log filename."""
self.shell.logger.switch_log(1)
@line_magic
def logstate(self, parameter_s=''):
"""Print the status of the logging system."""
self.shell.logger.logstate() |
298,681 | system data | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionsForEDMResult',
'AwaitableGetPrivateEndpointConnectionsForEDMResult',
'get_private_endpoint_connections_for_edm',
'get_private_endpoint_connections_for_edm_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionsForEDMResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, METHOD_NAME=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Required property for system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionsForEDMResult(GetPrivateEndpointConnectionsForEDMResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionsForEDMResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_private_endpoint_connections_for_edm(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionsForEDMResult:
"""
Gets the specified private endpoint connection associated with the service.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group that contains the service instance.
:param str resource_name: The name of the service instance.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:m365securityandcompliance/v20210325preview:getPrivateEndpointConnectionsForEDM', __args__, opts=opts, typ=GetPrivateEndpointConnectionsForEDMResult).value
return AwaitableGetPrivateEndpointConnectionsForEDMResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
private_link_service_connection_state=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
METHOD_NAME=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connections_for_edm)
def get_private_endpoint_connections_for_edm_output(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionsForEDMResult]:
"""
Gets the specified private endpoint connection associated with the service.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group that contains the service instance.
:param str resource_name: The name of the service instance.
"""
... |
298,682 | eject hook | from collections import deque
from types import FunctionType
from typing import Any
from typing import Callable
from typing import Deque
from typing import List
from typing import Tuple
from bytecode import Bytecode
from bytecode import Instr
from .compat import PYTHON_VERSION_INFO as PY
HookType = Callable[[Any], Any]
HookInfoType = Tuple[HookType, int, Any]
HOOK_ARG_PREFIX = "_hook_arg"
class InvalidLine(Exception):
"""
Raised when trying to inject a hook on an invalid line, e.g. a comment or a blank line.
"""
def _inject_hook(code, hook, lineno, arg):
# type: (Bytecode, HookType, int, Any) -> None
"""Inject a hook at the given line number inside an abstract code object.
The hook is called with the given argument, which is also used as an
identifier for the hook itself. This should be kept in case the hook needs
to be removed.
"""
# DEV: In general there are no guarantees for bytecode to be "linear",
# meaning that a line number can occur multiple times. We need to find all
# occurrences and inject the hook at each of them. An example of when this
# happens is with finally blocks, which are duplicated at the end of the
# bytecode.
locs = deque() # type: Deque[int]
last_lineno = None
for i, instr in enumerate(code):
try:
if instr.lineno == last_lineno:
continue
last_lineno = instr.lineno
if instr.lineno == lineno:
locs.appendleft(i)
except AttributeError:
# pseudo-instruction (e.g. label)
pass
if not locs:
raise InvalidLine("Line %d does not exist or is either blank or a comment" % lineno)
# DEV: This is the bytecode equivalent of
# >>> hook(arg)
# Additionally, we must discard the return value (top of the stack) to
# restore the stack to the state prior to the call.
for i in locs:
if PY < (3, 11):
code[i:i] = Bytecode(
[
Instr("LOAD_CONST", hook, lineno=lineno),
Instr("LOAD_CONST", arg, lineno=lineno),
Instr("CALL_FUNCTION", 1, lineno=lineno),
Instr("POP_TOP", lineno=lineno),
]
)
elif PY >= (3, 12):
code[i:i] = Bytecode(
[
Instr("PUSH_NULL", lineno=lineno),
Instr("LOAD_CONST", hook, lineno=lineno),
Instr("LOAD_CONST", arg, lineno=lineno),
Instr("CALL", 1, lineno=lineno),
Instr("POP_TOP", lineno=lineno),
]
)
else:
# Python 3.11
code[i:i] = Bytecode(
[
Instr("PUSH_NULL", lineno=lineno),
Instr("LOAD_CONST", hook, lineno=lineno),
Instr("LOAD_CONST", arg, lineno=lineno),
Instr("PRECALL", 1, lineno=lineno),
Instr("CALL", 1, lineno=lineno),
Instr("POP_TOP", lineno=lineno),
]
)
# Default to Python 3.11 opcodes
_INJECT_HOOK_OPCODES = ["PUSH_NULL", "LOAD_CONST", "LOAD_CONST", "PRECALL", "CALL", "POP_TOP"]
if PY < (3, 11):
_INJECT_HOOK_OPCODES = ["LOAD_CONST", "LOAD_CONST", "CALL_FUNCTION", "POP_TOP"]
elif PY >= (3, 12):
_INJECT_HOOK_OPCODES = ["PUSH_NULL", "LOAD_CONST", "LOAD_CONST", "CALL", "POP_TOP"]
_INJECT_HOOK_OPCODE_POS = 0 if PY < (3, 11) else 1
_INJECT_ARG_OPCODE_POS = 1 if PY < (3, 11) else 2
def _eject_hook(code, hook, line, arg):
# type: (Bytecode, HookType, int, Any) -> None
"""Eject a hook from the abstract code object at the given line number.
The hook is identified by its argument. This ensures that only the right
hook is ejected.
"""
locs = deque() # type: Deque[int]
for i, instr in enumerate(code):
try:
# DEV: We look at the expected opcode pattern to match the injected
# hook and we also test for the expected opcode arguments
if (
instr.lineno == line
and code[i + _INJECT_HOOK_OPCODE_POS].arg == hook # bound methods don't like identity comparisons
and code[i + _INJECT_ARG_OPCODE_POS].arg is arg
and [code[_].name for _ in range(i, i + len(_INJECT_HOOK_OPCODES))] == _INJECT_HOOK_OPCODES
):
locs.appendleft(i)
except AttributeError:
# pseudo-instruction (e.g. label)
pass
except IndexError:
pass
if not locs:
raise InvalidLine("Line %d does not contain a hook" % line)
for i in locs:
del code[i : i + len(_INJECT_HOOK_OPCODES)]
def _function_with_new_code(f, abstract_code):
f.__code__ = abstract_code.to_code()
return f
def inject_hooks(f, hooks):
# type: (FunctionType, List[HookInfoType]) -> List[HookInfoType]
"""Bulk-inject a list of hooks into a function.
Hooks are specified via a list of tuples, where each tuple contains the hook
itself, the line number and the identifying argument passed to the hook.
Returns the list of hooks that failed to be injected.
"""
abstract_code = Bytecode.from_code(f.__code__)
failed = []
for hook, line, arg in hooks:
try:
_inject_hook(abstract_code, hook, line, arg)
except InvalidLine:
failed.append((hook, line, arg))
if len(failed) < len(hooks):
_function_with_new_code(f, abstract_code)
return failed
def eject_hooks(f, hooks):
# type: (FunctionType, List[HookInfoType]) -> List[HookInfoType]
"""Bulk-eject a list of hooks from a function.
The hooks are specified via a list of tuples, where each tuple contains the
hook line number and the identifying argument.
Returns the list of hooks that failed to be ejected.
"""
abstract_code = Bytecode.from_code(f.__code__)
failed = []
for hook, line, arg in hooks:
try:
_eject_hook(abstract_code, hook, line, arg)
except InvalidLine:
failed.append((hook, line, arg))
if len(failed) < len(hooks):
_function_with_new_code(f, abstract_code)
return failed
def inject_hook(f, hook, line, arg):
# type: (FunctionType, HookType, int, Any) -> FunctionType
"""Inject a hook into a function.
The hook is injected at the given line number and called with the given
argument. The latter is also used as an identifier for the hook. This should
be kept in case the hook needs to be removed.
"""
abstract_code = Bytecode.from_code(f.__code__)
_inject_hook(abstract_code, hook, line, arg)
return _function_with_new_code(f, abstract_code)
def METHOD_NAME(f, hook, line, arg):
# type: (FunctionType, HookType, int, Any) -> FunctionType
"""Eject a hook from a function.
The hook is identified by its line number and the argument passed to the
hook.
"""
abstract_code = Bytecode.from_code(f.__code__)
_eject_hook(abstract_code, hook, line, arg)
return _function_with_new_code(f, abstract_code) |
298,683 | create if not exist | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""train and evaluate"""
import tqdm
import json
import numpy as np
import sys
import os
import paddle.fluid as F
from tensorboardX import SummaryWriter
from ogb.nodeproppred import Evaluator
from ogb.nodeproppred import NodePropPredDataset
def multi_device(reader, dev_count):
"""multi device"""
if dev_count == 1:
for batch in reader:
yield batch
else:
batches = []
for batch in reader:
batches.append(batch)
if len(batches) == dev_count:
yield batches
batches = []
class OgbEvaluator(object):
def __init__(self):
d_name = "ogbn-arxiv"
dataset = NodePropPredDataset(name=d_name)
graph, label = dataset[0]
self.num_nodes = graph["num_nodes"]
self.ogb_evaluator = Evaluator(name="ogbn-arxiv")
def eval(self, scores, labels, phase):
pred = (np.argmax(scores, axis=1)).reshape([-1, 1])
ret = {}
ret['%s_acc' % (phase)] = self.ogb_evaluator.eval({
'y_true': labels,
'y_pred': pred,
})['acc']
return ret
def evaluate(model, valid_exe, valid_ds, valid_prog, dev_count, evaluator,
phase, full_batch):
"""evaluate """
cc = 0
scores = []
labels = []
if full_batch:
valid_iter = _full_batch_wapper(valid_ds)
else:
valid_iter = valid_ds.generator
for feed_dict in tqdm.tqdm(
multi_device(valid_iter(), dev_count), desc='evaluating'):
if dev_count > 1:
output = valid_exe.run(feed=feed_dict,
fetch_list=[model.logits, model.labels])
else:
output = valid_exe.run(valid_prog,
feed=feed_dict,
fetch_list=[model.logits, model.labels])
scores.append(output[0])
labels.append(output[1])
scores = np.vstack(scores)
labels = np.vstack(labels)
ret = evaluator.eval(scores, labels, phase)
return ret
def METHOD_NAME(path):
basedir = os.path.dirname(path)
if not os.path.exists(basedir):
os.makedirs(basedir)
def _full_batch_wapper(ds):
feed_dict = {}
feed_dict["batch_nodes"] = np.array(ds.nodes_idx, dtype="int64")
feed_dict["labels"] = np.array(ds.labels, dtype="int64")
def r():
yield feed_dict
return r
def train_and_evaluate(exe,
train_exe,
valid_exe,
train_ds,
valid_ds,
test_ds,
train_prog,
valid_prog,
full_batch,
model,
metric,
epoch=20,
dev_count=1,
train_log_step=5,
eval_step=10000,
evaluator=None,
output_path=None):
"""train and evaluate"""
global_step = 0
log_path = os.path.join(output_path, "log")
METHOD_NAME(log_path)
writer = SummaryWriter(log_path)
best_model = 0
if full_batch:
train_iter = _full_batch_wapper(train_ds)
else:
train_iter = train_ds.generator
for e in range(epoch):
ret_sum_loss = 0
per_step = 0
scores = []
labels = []
for feed_dict in tqdm.tqdm(
multi_device(train_iter(), dev_count), desc='Epoch %s' % e):
if dev_count > 1:
ret = train_exe.run(feed=feed_dict, fetch_list=metric.vars)
ret = [[np.mean(v)] for v in ret]
else:
ret = train_exe.run(
train_prog,
feed=feed_dict,
fetch_list=[model.loss, model.logits, model.labels]
#fetch_list=metric.vars
)
scores.append(ret[1])
labels.append(ret[2])
ret = [ret[0]]
ret = metric.parse(ret)
if global_step % train_log_step == 0:
for key, value in ret.items():
writer.add_scalar(
'train_' + key, value, global_step=global_step)
ret_sum_loss += ret['loss']
per_step += 1
global_step += 1
if global_step % eval_step == 0:
eval_ret = evaluate(model, exe, valid_ds, valid_prog, 1,
evaluator, "valid", full_batch)
test_eval_ret = evaluate(model, exe, test_ds, valid_prog, 1,
evaluator, "test", full_batch)
eval_ret.update(test_eval_ret)
sys.stderr.write(json.dumps(eval_ret, indent=4) + "\n")
for key, value in eval_ret.items():
writer.add_scalar(key, value, global_step=global_step)
if eval_ret["valid_acc"] > best_model:
F.io.save_persistables(
exe,
os.path.join(output_path, "checkpoint"), train_prog)
eval_ret["epoch"] = e
#eval_ret["step"] = global_step
with open(os.path.join(output_path, "best.txt"), "w") as f:
f.write(json.dumps(eval_ret, indent=2) + '\n')
best_model = eval_ret["valid_acc"]
scores = np.vstack(scores)
labels = np.vstack(labels)
ret = evaluator.eval(scores, labels, "train")
sys.stderr.write(json.dumps(ret, indent=4) + "\n")
#print(json.dumps(ret, indent=4) + "\n")
# Epoch End
sys.stderr.write("epoch:{}, average loss {}\n".format(e, ret_sum_loss /
per_step))
eval_ret = evaluate(model, exe, valid_ds, valid_prog, 1, evaluator,
"valid", full_batch)
test_eval_ret = evaluate(model, exe, test_ds, valid_prog, 1, evaluator,
"test", full_batch)
eval_ret.update(test_eval_ret)
sys.stderr.write(json.dumps(eval_ret, indent=4) + "\n")
for key, value in eval_ret.items():
writer.add_scalar(key, value, global_step=global_step)
if eval_ret["valid_acc"] > best_model:
F.io.save_persistables(exe,
os.path.join(output_path, "checkpoint"),
train_prog)
#eval_ret["step"] = global_step
eval_ret["epoch"] = e
with open(os.path.join(output_path, "best.txt"), "w") as f:
f.write(json.dumps(eval_ret, indent=2) + '\n')
best_model = eval_ret["valid_acc"]
writer.close() |
298,684 | databases | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ListWebAppBackupConfigurationResult',
'AwaitableListWebAppBackupConfigurationResult',
'list_web_app_backup_configuration',
'list_web_app_backup_configuration_output',
]
@pulumi.output_type
class ListWebAppBackupConfigurationResult:
"""
Description of a backup which will be performed.
"""
def __init__(__self__, backup_name=None, backup_schedule=None, METHOD_NAME=None, enabled=None, id=None, kind=None, name=None, storage_account_url=None, type=None):
if backup_name and not isinstance(backup_name, str):
raise TypeError("Expected argument 'backup_name' to be a str")
pulumi.set(__self__, "backup_name", backup_name)
if backup_schedule and not isinstance(backup_schedule, dict):
raise TypeError("Expected argument 'backup_schedule' to be a dict")
pulumi.set(__self__, "backup_schedule", backup_schedule)
if METHOD_NAME and not isinstance(METHOD_NAME, list):
raise TypeError("Expected argument 'databases' to be a list")
pulumi.set(__self__, "databases", METHOD_NAME)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if storage_account_url and not isinstance(storage_account_url, str):
raise TypeError("Expected argument 'storage_account_url' to be a str")
pulumi.set(__self__, "storage_account_url", storage_account_url)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="backupName")
def backup_name(self) -> Optional[str]:
"""
Name of the backup.
"""
return pulumi.get(self, "backup_name")
@property
@pulumi.getter(name="backupSchedule")
def backup_schedule(self) -> Optional['outputs.BackupScheduleResponse']:
"""
Schedule for the backup if it is executed periodically.
"""
return pulumi.get(self, "backup_schedule")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Sequence['outputs.DatabaseBackupSettingResponse']]:
"""
Databases included in the backup.
"""
return pulumi.get(self, "databases")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
True if the backup schedule is enabled (must be included in that case), false if the backup schedule should be disabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccountUrl")
def storage_account_url(self) -> str:
"""
SAS URL to the container.
"""
return pulumi.get(self, "storage_account_url")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListWebAppBackupConfigurationResult(ListWebAppBackupConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppBackupConfigurationResult(
backup_name=self.backup_name,
backup_schedule=self.backup_schedule,
METHOD_NAME=self.METHOD_NAME,
enabled=self.enabled,
id=self.id,
kind=self.kind,
name=self.name,
storage_account_url=self.storage_account_url,
type=self.type)
def list_web_app_backup_configuration(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppBackupConfigurationResult:
"""
Description for Gets the backup configuration of an app.
Azure REST API version: 2022-09-01.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:web:listWebAppBackupConfiguration', __args__, opts=opts, typ=ListWebAppBackupConfigurationResult).value
return AwaitableListWebAppBackupConfigurationResult(
backup_name=pulumi.get(__ret__, 'backup_name'),
backup_schedule=pulumi.get(__ret__, 'backup_schedule'),
METHOD_NAME=pulumi.get(__ret__, 'databases'),
enabled=pulumi.get(__ret__, 'enabled'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
storage_account_url=pulumi.get(__ret__, 'storage_account_url'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(list_web_app_backup_configuration)
def list_web_app_backup_configuration_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListWebAppBackupConfigurationResult]:
"""
Description for Gets the backup configuration of an app.
Azure REST API version: 2022-09-01.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
... |
298,685 | test win2016 winrm | """
:codeauthor: Nicole Thomas <nicole@saltstack.com>
"""
import os
import pytest
import yaml
import salt.utils.cloud
import salt.utils.files
import salt.utils.yaml
from tests.integration.cloud.helpers.cloud_test_base import CloudTest
from tests.support import win_installer
from tests.support.runtests import RUNTIME_VARS
HAS_WINRM = salt.utils.cloud.HAS_WINRM and salt.utils.cloud.HAS_SMB
# THis test needs a longer timeout than other cloud tests
TIMEOUT = 1200
class EC2Test(CloudTest):
"""
Integration tests for the EC2 cloud provider in Salt-Cloud
"""
PROVIDER = "ec2"
REQUIRED_PROVIDER_CONFIG_ITEMS = ("id", "key", "keyname", "private_key", "location")
@staticmethod
def __fetch_installer():
# Determine the downloaded installer name by searching the files
# directory for the first file that looks like an installer.
for path, dirs, files in os.walk(RUNTIME_VARS.FILES):
for file in files:
if file.startswith(win_installer.PREFIX):
return file
# If the installer wasn't found in the previous steps, download the latest Windows installer executable
name = win_installer.latest_installer_name()
path = os.path.join(RUNTIME_VARS.FILES, name)
with salt.utils.files.fopen(path, "wb") as fp:
win_installer.download_and_verify(fp, name)
return name
@property
def installer(self):
"""
Make sure the testing environment has a Windows installer executable.
"""
if not hasattr(self, "_installer"):
self._installer = self.__fetch_installer()
return self._installer
def setUp(self):
"""
Sets up the test requirements
"""
group_or_subnet = self.provider_config.get("securitygroup")
if not group_or_subnet:
group_or_subnet = self.provider_config.get("subnetid")
if not group_or_subnet:
self.skipTest(
"securitygroup or subnetid missing for {} config".format(self.PROVIDER)
)
super().setUp()
def override_profile_config(self, name, data):
conf_path = os.path.join(
RUNTIME_VARS.TMP_CONF_DIR, "cloud.profiles.d", "ec2.conf"
)
with salt.utils.files.fopen(conf_path, "r") as fp:
conf = yaml.safe_load(fp)
conf[name].update(data)
with salt.utils.files.fopen(conf_path, "w") as fp:
salt.utils.yaml.safe_dump(conf, fp)
def copy_file(self, name):
"""
Copy a file from tests/integration/files to a test's temporary
configuration directory. The path to the file which is created will be
returned.
"""
src = os.path.join(RUNTIME_VARS.FILES, name)
dst = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, name)
with salt.utils.files.fopen(src, "rb") as sfp:
with salt.utils.files.fopen(dst, "wb") as dfp:
dfp.write(sfp.read())
return dst
def _test_instance(self, profile, debug):
"""
Tests creating and deleting an instance on EC2 (classic)
"""
# create the instance
cmd = ["-p", profile]
if debug:
cmd.extend(["-l", "debug"])
cmd.append(self.instance_name)
ret_val = self.run_cloud(" ".join(cmd), timeout=TIMEOUT)
# check if instance returned with salt installed
self.assertInstanceExists(ret_val)
ipv6Address_present = False
for each in ret_val:
if "ipv6Address:" in each:
ipv6Address_present = True
assert ipv6Address_present
self.assertDestroyInstance()
def test_instance_rename(self):
"""
Tests creating and renaming an instance on EC2 (classic)
"""
# create the instance
ret_val = self.run_cloud(
"-p ec2-test {} --no-deploy".format(self.instance_name), timeout=TIMEOUT
)
# check if instance returned
self.assertInstanceExists(ret_val)
changed_name = self.instance_name + "-changed"
rename_result = self.run_cloud(
"-a rename {} newname={} --assume-yes".format(
self.instance_name, changed_name
),
timeout=TIMEOUT,
)
self.assertFalse(
self._instance_exists(),
"Instance wasn't renamed: |\n{}".format(rename_result),
)
self.assertInstanceExists(instance_name=changed_name)
self.assertDestroyInstance(changed_name)
def test_instance(self):
"""
Tests creating and deleting an instance on EC2 (classic)
"""
self._test_instance("ec2-test", debug=False)
def test_win2012r2_psexec(self):
"""
Tests creating and deleting a Windows 2012r2instance on EC2 using
psexec (classic)
"""
# TODO: psexec calls hang and the test fails by timing out. The same
# same calls succeed when run outside of the test environment.
# FIXME? Does this override need to be undone at the end of the test?
self.override_profile_config(
"ec2-win2012r2-test",
{
"use_winrm": False,
"userdata_file": self.copy_file("windows-firewall-winexe.ps1"),
"win_installer": self.copy_file(self.installer),
},
)
self._test_instance("ec2-win2012r2-test", debug=True)
@pytest.mark.skipif(
not HAS_WINRM, reason="Skip when winrm dependencies are missing"
)
def test_win2012r2_winrm(self):
"""
Tests creating and deleting a Windows 2012r2 instance on EC2 using
winrm (classic)
"""
self.override_profile_config(
"ec2-win2012r2-test",
{
"userdata_file": self.copy_file("windows-firewall.ps1"),
"win_installer": self.copy_file(self.installer),
"winrm_ssl_verify": False,
"use_winrm": True,
},
)
self._test_instance("ec2-win2012r2-test", debug=True)
def test_win2016_psexec(self):
"""
Tests creating and deleting a Windows 2016 instance on EC2 using winrm
(classic)
"""
# TODO: winexe calls hang and the test fails by timing out. The
# same calls succeed when run outside of the test environment.
self.override_profile_config(
"ec2-win2016-test",
{
"use_winrm": False,
"userdata_file": self.copy_file("windows-firewall-winexe.ps1"),
"win_installer": self.copy_file(self.installer),
},
)
self._test_instance("ec2-win2016-test", debug=True)
@pytest.mark.skipif(
not HAS_WINRM, reason="Skip when winrm dependencies are missing"
)
def METHOD_NAME(self):
"""
Tests creating and deleting a Windows 2016 instance on EC2 using winrm
(classic)
"""
self.override_profile_config(
"ec2-win2016-test",
{
"userdata_file": self.copy_file("windows-firewall.ps1"),
"win_installer": self.copy_file(self.installer),
"winrm_ssl_verify": False,
"use_winrm": True,
},
)
self._test_instance("ec2-win2016-test", debug=True) |
298,686 | get metadata | import numpy as np
import os
import csv
import logging
from rsciio._docstrings import FILENAME_DOC, LAZY_UNSUPPORTED_DOC, RETURNS_DOC
_logger = logging.getLogger(__name__)
# At some point, if there is another readerw, whith also use csv file, it will
# be necessary to mention the other reader in this message (and to add an
# argument in the load function to specify the correct reader)
invalid_file_error = (
"The csv reader can't import the file, please"
" make sure, that this is a valid Impulse log file."
)
invalid_filenaming_error = {
"The filename does not match Impulse naming, please"
" make sure that the filenames for the logfile and metadata file are unchanged."
}
def file_reader(filename, lazy=False):
"""
Read a DENSsolutions Impulse logfile.
Parameters
----------
%s
%s
%s
"""
if lazy is not False:
raise NotImplementedError("Lazy loading is not supported.")
csv_file = ImpulseCSV(filename)
return _impulseCSV_log_reader(csv_file)
file_reader.__doc__ %= (FILENAME_DOC, LAZY_UNSUPPORTED_DOC, RETURNS_DOC)
def _impulseCSV_log_reader(csv_file):
csvs = []
for key in csv_file.logged_quantity_name_list:
csvs.append(csv_file.get_dictionary(key))
return csvs
class ImpulseCSV:
def __init__(self, filename):
self.filename = filename
self._parse_header()
self._read_data()
def _parse_header(self):
with open(self.filename, "r") as f:
s = f.readline()
self.column_names = s.strip().split(",")
if not self._is_impulse_csv_file():
raise IOError(invalid_file_error)
self._read_metadatafile()
self.logged_quantity_name_list = self.column_names[2:]
def _is_impulse_csv_file(self):
return "TimeStamp" in self.column_names and len(self.column_names) >= 3
def get_dictionary(self, quantity):
return {
"data": self._data_dictionary[quantity],
"axes": self._get_axes(),
"metadata": self.METHOD_NAME(quantity),
"original_metadata": {"Impulse_header": self.original_metadata},
}
def METHOD_NAME(self, quantity):
return {
"General": {
"original_filename": os.path.split(self.filename)[1],
"title": "%s" % quantity,
"date": self.original_metadata["Experiment_date"],
"time": self.original_metadata["Experiment_time"],
},
"Signal": {
"quantity": self._parse_quantity_units(quantity),
},
}
def _parse_quantity_units(self, quantity):
quantity_split = quantity.strip().split(" ")
if (
len(quantity_split) > 1
and quantity_split[-1][0] == "("
and quantity_split[-1][-1] == ")"
):
return quantity_split[-1].replace("(", "").replace(")", "")
else:
return ""
def _read_data(self):
names = [
name.replace(" ", "_")
.replace("°C", "C")
.replace("#", "No")
.replace("(", "")
.replace(")", "")
.replace("/", "_")
.replace("%", "Perc")
for name in self.column_names
]
data = np.genfromtxt(
self.filename,
delimiter=",",
dtype=None,
names=names,
skip_header=1,
encoding="latin1",
)
self._data_dictionary = dict()
for i, (name, name_dtype) in enumerate(zip(self.column_names, names)):
if name == "Experiment time":
self.time_axis = data[name_dtype]
elif name == "MixValve":
mixvalvedatachanged = data[name_dtype]
for index, item in enumerate(data[name_dtype]):
mixvalvedatachanged[index] = (
int(int(item.split(";")[0]) + 2) * 100
+ (int(item.split(";")[1]) + 2) * 10
+ (int(item.split(";")[2]) + 2)
)
mixvalvedatachangedint = np.array(mixvalvedatachanged, dtype=np.int32)
self._data_dictionary[name] = mixvalvedatachangedint
else:
self._data_dictionary[name] = data[name_dtype]
def _read_metadatafile(self):
# Locate the experiment metadata file
self.original_metadata = {}
notes = []
notes_section = False
if "_Synchronized data" in str(self.filename) or "raw" in str(
self.filename
): # Check if Impulse filename formatting is intact
metadata_file = (
"_".join(str(self.filename).split("_")[:-1]) + "_Metadata.log"
).replace("\\", "/")
if os.path.isfile(metadata_file):
with open(metadata_file, newline="") as csvfile:
metadata_file_reader = csv.reader(csvfile, delimiter=",")
for row in metadata_file_reader:
if notes_section:
notes.append(row[0])
elif row[0] == "Live notes":
notes_section = True
notes = [row[1].strip()]
else:
self.original_metadata[row[0].replace(" ", "_")] = row[
1
].strip()
self.original_metadata["Notes"] = notes
else:
_logger.warning("No metadata file found in folder")
else:
raise IOError(invalid_filenaming_error)
def _get_axes(self):
return [
{
"size": self.time_axis.shape[0],
"index_in_array": 0,
"name": "Time",
"scale": np.diff(self.time_axis[1:-1]).mean(),
"offset": 0,
"units": "Seconds",
"navigate": False,
}
] |
298,687 | copier version | """Some utility functions."""
import errno
import os
import platform
import stat
import sys
from contextlib import suppress
from importlib.metadata import version
from pathlib import Path
from types import TracebackType
from typing import Any, Callable, Literal, Optional, TextIO, Tuple, Union, cast
import colorama
from packaging.version import Version
from pydantic import StrictBool
from .types import IntSeq
colorama.init()
class Style:
"""Common color styles."""
OK: IntSeq = [colorama.Fore.GREEN, colorama.Style.BRIGHT]
WARNING: IntSeq = [colorama.Fore.YELLOW, colorama.Style.BRIGHT]
IGNORE: IntSeq = [colorama.Fore.CYAN]
DANGER: IntSeq = [colorama.Fore.RED, colorama.Style.BRIGHT]
RESET: IntSeq = [colorama.Fore.RESET, colorama.Style.RESET_ALL]
INDENT = " " * 2
HLINE = "-" * 42
OS: Optional[Literal["linux", "macos", "windows"]] = cast(
Any,
{
"Linux": "linux",
"Darwin": "macos",
"Windows": "windows",
}.get(platform.system()),
)
def METHOD_NAME() -> Version:
"""Get closest match for the installed copier version."""
# Importing __version__ at the top of the module creates a circular import
# ("cannot import name '__version__' from partially initialized module 'copier'"),
# so instead we do a lazy import here
from . import __version__
if __version__ != "0.0.0":
return Version(__version__)
# Get the installed package version otherwise, which is sometimes more specific
return Version(version("copier"))
def printf(
action: str,
msg: Any = "",
style: Optional[IntSeq] = None,
indent: int = 10,
quiet: Union[bool, StrictBool] = False,
file_: TextIO = sys.stdout,
) -> Optional[str]:
"""Print string with common format."""
if quiet:
return None # HACK: Satisfy MyPy
_msg = str(msg)
action = action.rjust(indent, " ")
if not style:
return action + _msg
out = style + [action] + Style.RESET + [INDENT, _msg] # type: ignore
print(*out, sep="", file=file_)
return None # HACK: Satisfy MyPy
def printf_exception(
e: Exception, action: str, msg: str = "", indent: int = 0, quiet: bool = False
) -> None:
"""Print exception with common format."""
if not quiet:
print("", file=sys.stderr)
printf(action, msg=msg, style=Style.DANGER, indent=indent, file_=sys.stderr)
print(HLINE, file=sys.stderr)
print(e, file=sys.stderr)
print(HLINE, file=sys.stderr)
def cast_str_to_bool(value: Any) -> bool:
"""Parse anything to bool.
Params:
value:
Anything to be casted to a bool. Tries to be as smart as possible.
1. Cast to number. Then: 0 = False; anything else = True.
1. Find [YAML booleans](https://yaml.org/type/bool.html),
[YAML nulls](https://yaml.org/type/null.html) or `none` in it
and use it appropriately.
1. Cast to boolean using standard python `bool(value)`.
"""
# Assume it's a number
with suppress(TypeError, ValueError):
return bool(float(value))
# Assume it's a string
with suppress(AttributeError):
lower = value.lower()
if lower in {"y", "yes", "t", "true", "on"}:
return True
elif lower in {"n", "no", "f", "false", "off", "~", "null", "none"}:
return False
# Assume nothing
return bool(value)
def force_str_end(original_str: str, end: str = "\n") -> str:
"""Make sure a `original_str` ends with `end`.
Params:
original_str: String that you want to ensure ending.
end: String that must exist at the end of `original_str`
"""
if not original_str.endswith(end):
return original_str + end
return original_str
def handle_remove_readonly(
func: Callable, path: str, exc: Tuple[BaseException, OSError, TracebackType]
) -> None:
"""Handle errors when trying to remove read-only files through `shutil.rmtree`.
On Windows, `shutil.rmtree` does not handle read-only files very well. This handler
makes sure the given file is writable, then re-execute the given removal function.
Arguments:
func: An OS-dependant function used to remove a file.
path: The path to the file to remove.
exc: A `sys.exc_info()` object.
"""
excvalue = exc[1]
if func in (os.rmdir, os.remove, os.unlink) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
func(path)
else:
raise
def readlink(link: Path) -> Path:
"""A custom version of os.readlink/pathlib.Path.readlink.
pathlib.Path.readlink is what we ideally would want to use, but it is only available on python>=3.9.
"""
if sys.version_info >= (3, 9):
return link.readlink()
else:
return Path(os.readlink(link)) |
298,688 | get event | import os
import ast
import imp
import yaml
import subprocess
from sonic_py_common import device_info
class Common:
DEVICE_PATH = '/usr/share/sonic/device/'
PMON_PLATFORM_PATH = '/usr/share/sonic/platform/'
CONFIG_DIR = 'sonic_platform_config'
OUTPUT_SOURCE_IPMI = 'ipmitool'
OUTPUT_SOURCE_GIVEN_LIST = 'value_list'
OUTPUT_SOURCE_GIVEN_VALUE = 'value'
OUTPUT_SOURCE_GIVEN_CLASS = 'class'
OUTPUT_SOURCE_SYSFS = 'sysfs_value'
OUTPUT_SOURCE_FUNC = 'function'
OUTPUT_SOURCE_GIVEN_TXT_FILE = 'txt_file'
OUTPUT_SOURCE_GIVEN_VER_HEX_FILE = 'hex_version_file'
OUTPUT_SOURCE_GIVEN_VER_HEX_ADDR = 'hex_version_getreg'
SET_METHOD_IPMI = 'ipmitool'
NULL_VAL = 'N/A'
HOST_CHK_CMD = ["docker"]
REF_KEY = '$ref:'
def __init__(self, conf=None):
self._main_conf = conf
self.platform = None
self.hwsku = None
def get_platform(self):
(self.platform, self.hwsku) = device_info.get_platform_and_hwsku(
) if not self.platform else (self.platform, self.hwsku)
return self.platform
def get_hwsku(self):
(self.platform, self.hwsku) = device_info.get_platform_and_hwsku(
) if not self.hwsku else (self.platform, self.hwsku)
return self.hwsku
def run_command(self, command):
status = False
output = ""
try:
p = subprocess.Popen(command, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
raw_data, err = p.communicate()
if p.returncode == 0:
status, output = True, raw_data.strip()
except Exception:
pass
return status, output
def _clean_input(self, input, config):
cleaned_input = input
ai = config.get('avaliable_input')
if ai and input not in ai:
return None
input_translator = config.get('input_translator')
if type(input_translator) is dict:
cleaned_input = input_translator.get(input)
elif type(input_translator) is str:
cleaned_input = ast.literal_eval(input_translator.format(input))
return cleaned_input
def _clean_output(self, index, output, config):
output_translator = config.get('output_translator')
if type(output_translator) is dict:
output = output_translator.get(output)
elif type(output_translator) is str:
output = ast.literal_eval(output_translator.format(output))
elif type(output_translator) is list:
output = ast.literal_eval(output_translator[index].format(output))
return output
def _sysfs_read(self, index, config):
sysfs_path = config.get('sysfs_path')
argument = config.get('argument', '')
if self.REF_KEY in argument:
argument = self._main_conf[argument.split(":")[1]]
if type(argument) is list:
sysfs_path = sysfs_path.format(argument[index])
content = ""
try:
content = open(sysfs_path)
content = content.readline().rstrip()
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
return content
def _sysfs_write(self, index, config, input):
sysfs_path = config.get('sysfs_path')
argument = config.get('argument', '')
if self.REF_KEY in argument:
argument = self._main_conf[argument.split(":")[1]]
if type(argument) is list:
sysfs_path = sysfs_path.format(argument[index])
write_offset = int(config.get('write_offset', 0))
output = ""
try:
open_file = open(sysfs_path, "r+")
open_file.seek(write_offset)
open_file.write(input)
open_file.close()
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False, output
return True, output
def _hex_ver_decode(self, hver, num_of_bits, num_of_points):
ver_list = []
c_bit = 0
bin_val = bin(int(hver, 16))[2:].zfill(num_of_bits)
bit_split = num_of_bits / (num_of_points + 1)
for x in range(0, num_of_points+1):
split_bin = bin_val[c_bit:c_bit+bit_split]
ver_list.append(str(int(split_bin, 2)))
c_bit += bit_split
return '.'.join(ver_list)
def _get_class(self, config):
"""
Retreives value of expected attribute
Returns:
A value of the attribute of object
"""
path = config['host_path'] if self.is_host() else config['pmon_path']
module = imp.load_source(config['class'], path)
class_ = getattr(module, config['class'])
return class_
def get_reg(self, path, reg_addr):
with open(path, 'w') as file:
file.write(reg_addr + '\n')
with open(path, 'r') as file:
output = file.readline().strip()
return output
def set_reg(self, path, reg_addr, value):
with open(path, 'w') as file:
file.write("{0} {1}\n".format(reg_addr, value))
return None
def read_txt_file(self, path):
try:
with open(path, 'r') as f:
output = f.readline()
return output.strip('\n')
except Exception:
pass
return ''
def read_one_line_file(self, file_path):
try:
with open(file_path, 'r') as fd:
data = fd.readline()
return data.strip()
except IOError:
pass
return ''
def write_txt_file(self, file_path, value):
try:
with open(file_path, 'w') as fd:
fd.write(str(value))
except Exception:
return False
return True
def is_host(self):
try:
subprocess.call(self.HOST_CHK_CMD, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except FileNotFoundError:
return False
return True
def load_json_file(self, path):
"""
Retrieves the json object from json file path
Returns:
A json object
"""
with open(path, 'r') as f:
json_data = yaml.safe_load(f)
return json_data
def get_config_path(self, config_name):
"""
Retrieves the path to platform api config directory
Args:
config_name: A string containing the name of config file.
Returns:
A string containing the path to json file
"""
return os.path.join(self.DEVICE_PATH, self.platform, self.CONFIG_DIR, config_name) if self.is_host() else os.path.join(self.PMON_PLATFORM_PATH, self.CONFIG_DIR, config_name)
def METHOD_NAME(self, timeout, config, sfp_list):
"""
Returns a nested dictionary containing all devices which have
experienced a change at chassis level
"""
event_class = self._get_class(config)
return event_class(sfp_list).METHOD_NAME(timeout) |
298,689 | test fcm relay async retry | import json
from unittest.mock import patch
import pytest
from django.urls import reverse
from firebase_admin.exceptions import FirebaseError
from rest_framework import status
from rest_framework.test import APIClient
from apps.mobile_app.fcm_relay import FCMRelayThrottler, _get_message_from_request_data, fcm_relay_async
from apps.mobile_app.models import FCMDevice
from apps.mobile_app.tasks.new_alert_group import _get_fcm_message
@pytest.mark.django_db
def test_fcm_relay_disabled(
settings,
make_organization_and_user_with_plugin_token,
make_user_auth_headers,
make_public_api_token,
):
settings.FCM_RELAY_ENABLED = False
organization, user, token = make_organization_and_user_with_plugin_token()
_, token = make_public_api_token(user, organization)
client = APIClient()
url = reverse("mobile_app:fcm_relay")
response = client.post(url, HTTP_AUTHORIZATION=token)
assert response.status_code == status.HTTP_404_NOT_FOUND
@pytest.mark.django_db
def test_fcm_relay_post(
settings,
make_organization_and_user_with_plugin_token,
make_user_auth_headers,
make_public_api_token,
):
settings.FCM_RELAY_ENABLED = True
organization, user, token = make_organization_and_user_with_plugin_token()
_, token = make_public_api_token(user, organization)
client = APIClient()
url = reverse("mobile_app:fcm_relay")
data = {
"token": "test_registration_id",
"data": {},
"apns": {},
}
response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=token)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_fcm_relay_ratelimit(
settings,
make_organization_and_user_with_plugin_token,
make_user_auth_headers,
make_public_api_token,
):
settings.FCM_RELAY_ENABLED = True
organization, user, token = make_organization_and_user_with_plugin_token()
_, token = make_public_api_token(user, organization)
client = APIClient()
url = reverse("mobile_app:fcm_relay")
data = {
"token": "test_registration_id",
"data": {},
"apns": {},
}
with patch.object(FCMRelayThrottler, "rate", "0/m"):
response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=token)
assert response.status_code == status.HTTP_429_TOO_MANY_REQUESTS
@pytest.mark.django_db
def METHOD_NAME():
# check that FirebaseError is raised when send_message returns it so Celery task can retry
with patch.object(
FCMDevice, "send_message", return_value=FirebaseError(code="test_error_code", message="test_error_message")
):
with pytest.raises(FirebaseError):
fcm_relay_async(token="test_token", data={}, apns={})
def test_get_message_from_request_data():
token = "test_token"
data = {"test_data_key": "test_data_value"}
apns = {"headers": {"apns-priority": "10"}, "payload": {"aps": {"thread-id": "test_thread_id"}}}
android = {"priority": "high"}
message = _get_message_from_request_data(token, data, apns, android)
assert message.token == "test_token"
assert message.data == {"test_data_key": "test_data_value"}
assert message.apns.headers == {"apns-priority": "10"}
assert message.apns.payload.aps.thread_id == "test_thread_id"
assert message.android.priority == "high"
@pytest.mark.django_db
def test_fcm_relay_serialize_deserialize(
make_organization_and_user, make_alert_receive_channel, make_alert_group, make_alert
):
organization, user = make_organization_and_user()
device = FCMDevice.objects.create(user=user, registration_id="test_device_id")
alert_receive_channel = make_alert_receive_channel(organization=organization)
alert_group = make_alert_group(alert_receive_channel)
make_alert(alert_group=alert_group, raw_request_data={})
# Imitate sending a message to the FCM relay endpoint
original_message = _get_fcm_message(alert_group, user, device, critical=False)
request_data = json.loads(str(original_message))
# Imitate receiving a message from the FCM relay endpoint
relayed_message = _get_message_from_request_data(
request_data["token"], request_data["data"], request_data["apns"], request_data["android"]
)
# Check that the message is the same after serialization and deserialization
assert json.loads(str(original_message)) == json.loads(str(relayed_message)) |
298,690 | test transform caf msa foyer to beneficiary | from datetime import date
from tempfile import SpooledTemporaryFile
from typing import List
from unittest import TestCase
import pytest
from cdb.caf_msa.parse_infos_foyer_rsa import (
CafBeneficiary,
CafInfoFlux,
CafMsaInfosFoyer,
CdbBeneficiaryInfos,
parse_caf_file,
transform_bool,
transform_cafMsaFoyer_to_beneficiary,
transform_closure_reason,
transform_right_rsa,
transform_suspension_reason,
)
from tests.utils.approvaltests import verify_as_json
async def test_parse_caf_file(flux_mensuel_caf: SpooledTemporaryFile):
parsed = parse_caf_file(flux_mensuel_caf)
metadata = next(parsed)
assert isinstance(metadata, CafInfoFlux)
assert metadata.date == date(2022, 3, 5)
assert metadata.type == "M"
foyers: List[CafMsaInfosFoyer] = []
for foyer in parsed:
assert isinstance(foyer, CafMsaInfosFoyer)
foyers.append(foyer)
assert len(foyers) == 2
verify_as_json([foyer.json() for foyer in foyers])
rsa_right_test_data = [
("0", "rsa_demande_en_attente"),
("1", "rsa_refuse"),
("2", "rsa_droit_ouvert_versable"),
("3", "rsa_droit_ouvert_et_suspendu"),
("4", "rsa_droit_ouvert_versement_suspendu"),
("5", "rsa_clos"),
("6", "rsa_clos_anterieur"),
]
@pytest.mark.parametrize("a,expected", rsa_right_test_data)
def test_transform_right_rsa(a, expected):
assert transform_right_rsa(a) == expected
class TestTranformRightRsa(TestCase):
def test_transform_right_rsa_raise(self):
self.assertRaises(KeyError, transform_right_rsa, "toto")
rsa_suspension_test_data = [
("01", "caf_ressources_trop_elevees"),
("02", "caf_moins_25_sans_personne_charge"),
("03", "caf_activite_non_conforme"),
("04", "caf_titre_sejour_invalid"),
("05", "caf_rsa_inferieur_seuil"),
("06", "caf_declaration_ressource_non_fournie"),
("09", "caf_residence_non_conforme"),
("19", "caf_pas_isolement"),
("31", "caf_prestation_exclue"),
("34", "caf_regime_non_conforme"),
("35", "caf_demande_avantage_vieillesse_absent"),
("36", "caf_titre_sejour_absent"),
("44", "caf_hospitalisation"),
("70", "caf_action_non_engagee"),
("78", "caf_surface_ponderee_sup"),
("84", "caf_droit_eteint"),
("85", "caf_pas_allocataire"),
("97", "caf_beneficiaire_aah"),
("AB", "caf_allocataire_absent"),
("CV", "caf_attente_decision_PCG"),
("CZ", "caf_activite_anterieur_insuffisante"),
("DA", "caf_activite_anterieure_absente"),
("DB", "caf_etudiant_remuneration_insuffisante"),
("DC", "caf_activite_anterieure_non_conforme"),
(None, None),
("otot", None),
(" DC ", "caf_activite_anterieure_non_conforme"),
]
@pytest.mark.parametrize("a,expected", rsa_suspension_test_data)
def test_transform_suspension_reason(a, expected):
assert transform_suspension_reason(a) == expected
rsa_closure_test_data = [
("PCG", "caf_decision_pcg"),
("ECH", "caf_echeance"),
("EFF", "caf_annulation_bascule_rmi"),
("MUT", "caf_mutation"),
("RGD", "caf_regroupement"),
("RFD", "caf_radie_fin_droit"),
("RAU", "caf_radie_autre_motif"),
("RST", "caf_radie_option_rsta"),
("RSO", "caf_radie_option_rso"),
(None, None),
("otot", None),
]
@pytest.mark.parametrize("a,expected", rsa_closure_test_data)
def test_transform_closure_reason(a, expected):
assert transform_closure_reason(a) == expected
bool_test_data = [
("0", False),
("1", True),
]
@pytest.mark.parametrize("a,expected", bool_test_data)
def test_transform_bool(a, expected):
assert transform_bool(a) == expected
def METHOD_NAME():
personne = CafBeneficiary(nir="1231231231231", soumis_droit_et_devoir=False)
foyer = CafMsaInfosFoyer(
date_cloture_rsa=None,
motif_cloture_rsa=None,
matricule="AAAAAAA",
motif_suspension_versement_rsa=None,
beneficiaries=[personne],
sans_domicile_fixe="0",
etat_droit_rsa="2",
)
beneficiary = CdbBeneficiaryInfos(
right_rsa="rsa_droit_ouvert_versable",
caf_number="AAAAAAA",
is_homeless=False,
subject_right_and_duty=False,
rsa_closure_date=None,
rsa_closure_reason=None,
rsa_suspension_reason=None,
)
assert transform_cafMsaFoyer_to_beneficiary(personne, foyer) == beneficiary
soumis_droit_et_devoir_test_data = [
("0", False),
(False, False),
("1", True),
(True, True),
(None, None),
]
@pytest.mark.parametrize("a,expected", soumis_droit_et_devoir_test_data)
def test_transform_soumis_droit_et_devoir(a, expected):
beneficiary = CafBeneficiary(nir="1231231231231", soumis_droit_et_devoir=a)
assert beneficiary.soumis_droit_et_devoir == expected |
298,691 | test grad clip | import pytest
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing import assert_close
import colossalai
from colossalai.amp import convert_to_apex_amp
from colossalai.nn.optimizer import HybridAdam
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.zero import GeminiDDP, GeminiOptimizer
from colossalai.zero.gemini.chunk import search_chunk_configuration
from tests.components_to_test import run_fwd_bwd
from tests.components_to_test.registry import non_distributed_component_funcs
from tests.test_tensor.common_utils import set_seed
PLACEMENT_CONFIGS = [
{
'placement_policy': 'static',
'shard_param_frac': 0.0,
'offload_optim_frac': 0.0,
'offload_param_frac': 0.0
}, # zero2
{
'placement_policy': 'static',
'shard_param_frac': 0.0,
'offload_optim_frac': 1.0,
'offload_param_frac': 0.0
}, # zero2-offload
{
'placement_policy': 'static',
'shard_param_frac': 0.0,
'offload_optim_frac': 0.5,
'offload_param_frac': 0.0
}, # zero2-offload-half
{
'placement_policy': 'auto'
}
]
def check_param(model: GeminiDDP, torch_model: torch.nn.Module):
zero_dict = model.state_dict(only_rank_0=False)
torch_dict = torch_model.state_dict()
for key, value in torch_dict.items():
# key is 'module.model.PARAMETER', so we truncate it
key = key[7:]
assert key in zero_dict, "{} not in ZeRO dictionary.".format(key)
temp_zero_value = zero_dict[key].to(device=value.device, dtype=value.dtype)
# debug_print([0], "max range: ", key, torch.max(torch.abs(value - temp_zero_value)))
assert_close(value, temp_zero_value, rtol=1e-3, atol=4e-3)
@parameterize('placement_config', PLACEMENT_CONFIGS)
@parameterize('model_name', ['gpt2'])
def exam_grad_clipping(placement_config, model_name: str):
set_seed(1912)
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
torch_model = model_builder().cuda()
amp_config = dict(opt_level='O2', keep_batchnorm_fp32=False, loss_scale=32)
torch_optim = torch.optim.Adam(torch_model.parameters(), lr=1e-3)
torch_model, torch_optim = convert_to_apex_amp(torch_model, torch_optim, amp_config)
torch_model = DDP(torch_model, device_ids=[dist.get_rank()])
model = model_builder()
for torch_p, p in zip(torch_model.parameters(), model.parameters()):
p.data.copy_(torch_p.data)
world_size = torch.distributed.get_world_size()
config_dict, *_ = search_chunk_configuration(model, search_range_m=1, search_interval=100)
config_dict[world_size]['chunk_size'] = 5000
config_dict[world_size]['keep_gathered'] = False
if placement_config['placement_policy'] != 'cuda':
init_device = torch.device('cpu')
else:
init_device = None
model = GeminiDDP(model,
chunk_config_dict=config_dict,
chunk_init_device=init_device,
pin_memory=True,
**placement_config)
optimizer = HybridAdam(model.parameters(), lr=1e-3)
zero_optim = GeminiOptimizer(optimizer, model, initial_scale=32, clipping_norm=1.0)
model.train()
torch_model.train()
set_seed(dist.get_rank() * 3 + 128)
for i, (data, label) in enumerate(train_dataloader):
if i > 2:
break
data = data.cuda()
label = label.cuda()
zero_optim.zero_grad()
torch_optim.zero_grad()
torch_loss = run_fwd_bwd(torch_model, data, label, criterion, torch_optim)
loss = run_fwd_bwd(model, data, label, criterion, zero_optim)
assert_close(torch_loss, loss)
import apex.amp as apex_amp
torch.nn.utils.clip_grad_norm_(apex_amp.master_params(torch_optim), 1.0)
torch_optim.step()
zero_optim.step()
check_param(model, torch_model)
def run_dist(rank, world_size, port):
config = {}
colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
exam_grad_clipping()
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 2])
@rerun_if_address_is_in_use()
def METHOD_NAME(world_size):
spawn(run_dist, world_size)
if __name__ == '__main__':
METHOD_NAME(2) |
298,692 | format | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2011-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import contextlib
import copy
import io
import logging
import logging.handlers
import sys
import warnings
from edb.common import debug
from edb.common import term
LOG_LEVELS = {
'S': 'SILENT',
'D': 'DEBUG',
'I': 'INFO',
'E': 'ERROR',
'W': 'WARN',
'WARN': 'WARN',
'ERROR': 'ERROR',
'CRITICAL': 'CRITICAL',
'INFO': 'INFO',
'DEBUG': 'DEBUG',
'SILENT': 'SILENT'
}
class Dark16:
critical = term.Style16(color='white', bgcolor='red', bold=True)
error = term.Style16(color='white', bgcolor='red')
default = term.Style16(color='white', bgcolor='blue')
pid = date = term.Style16(color='black', bold=True)
name = term.Style16(color='black', bold=True)
message = term.Style16()
class Dark256:
critical = term.Style256(color='#c6c6c6', bgcolor='#870000', bold=True)
error = term.Style256(color='#c6c6c6', bgcolor='#870000')
warning = term.Style256(color='#c6c6c6', bgcolor='#5f00d7')
info = term.Style256(color='#c6c6c6', bgcolor='#005f00')
default = term.Style256(color='#c6c6c6', bgcolor='#000087')
pid = date = term.Style256(color='#626262', bold=True)
name = term.Style256(color='#A2A2A2')
message = term.Style16()
class EdgeDBLogFormatter(logging.Formatter):
default_time_format = '%Y-%m-%dT%H:%M:%S'
default_msec_format = '%s.%03d'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__styles = None
self._colorize = term.use_colors()
if self._colorize:
self._init_styles()
def _init_styles(self):
if not self.__styles:
if term.max_colors() >= 255:
self.__styles = Dark256()
else:
self.__styles = Dark16()
def formatTime(self, record, datefmt=None):
time = super().formatTime(record, datefmt=datefmt)
if self._colorize:
time = self.__styles.date.apply(time)
return time
def formatException(self, ei):
sio = io.StringIO()
with contextlib.redirect_stdout(sio):
sys.excepthook(*ei)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def METHOD_NAME(self, record):
if self._colorize:
record = copy.copy(record)
level = record.levelname
level_style = getattr(self.__styles, level.lower(),
self.__styles.default)
record.levelname = level_style.apply(level)
record.process = self.__styles.pid.apply(str(record.process))
record.message = self.__styles.message.apply(record.getMessage())
record.name = self.__styles.name.apply(record.name)
return super().METHOD_NAME(record)
class EdgeDBLogHandler(logging.StreamHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
fmt = EdgeDBLogFormatter(
'{levelname} {process} {asctime} {name}: {message}',
style='{')
self.setFormatter(fmt)
class EdgeDBLogger(logging.Logger):
def makeRecord(
self,
name,
level,
fn,
lno,
msg,
args,
exc_info,
func=None,
extra=None,
sinfo=None,
):
# Unlike the standard Logger class, we allow overwriting
# all attributes of the log record with stuff from *extra*.
factory = logging.getLogRecordFactory()
rv = factory(name, level, fn, lno, msg, args, exc_info, func, sinfo)
if extra is not None:
rv.__dict__.update(extra)
return rv
IGNORE_DEPRECATIONS_IN = {
'graphql',
'promise',
}
def early_setup():
logging.setLoggerClass(EdgeDBLogger)
def setup_logging(log_level, log_destination):
log_level = log_level.upper()
try:
log_level = LOG_LEVELS[log_level]
except KeyError:
raise RuntimeError('Invalid logging level {!r}'.METHOD_NAME(log_level))
if log_level == 'SILENT':
logger = logging.getLogger()
logger.disabled = True
logger.setLevel(logging.CRITICAL)
return
if log_destination == 'syslog':
fmt = logging.Formatter(
'{processName}[{process}]: {name}: {message}',
style='{')
handler = logging.handlers.SysLogHandler(
'/dev/log',
facility=logging.handlers.SysLogHandler.LOG_DAEMON)
handler.setFormatter(fmt)
elif log_destination == 'stderr':
handler = EdgeDBLogHandler()
else:
fmt = logging.Formatter(
'{levelname} {process} {asctime} {name}: {message}',
style='{')
handler = logging.FileHandler(log_destination)
handler.setFormatter(fmt)
log_level = logging._checkLevel(log_level)
logger = logging.getLogger()
logger.setLevel(log_level)
logger.addHandler(handler)
# Channel warnings into logging system
logging.captureWarnings(True)
# Show DeprecationWarnings by default ...
warnings.simplefilter('default', category=DeprecationWarning)
# ... except for some third-party` modules.
for ignored_module in IGNORE_DEPRECATIONS_IN:
warnings.filterwarnings('ignore', category=DeprecationWarning,
module=ignored_module)
if not debug.flags.log_metrics:
log_metrics = logging.getLogger('edb.server.metrics')
log_metrics.setLevel(logging.ERROR) |
298,693 | is time like | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: aeon developers, BSD-3-Clause License (see LICENSE file)
"""Validation and checking functions for time series."""
__all__ = [
"is_int",
"is_float",
"is_timedelta",
"is_date_offset",
"is_timedelta_or_date_offset",
"check_n_jobs",
"check_window_length",
"get_n_cases",
"get_type",
"equal_length",
"is_equal_length",
"has_missing",
"is_univariate",
]
__author__ = ["mloning", "Taiwo Owoseni", "khrapovs", "TonyBagnall"]
import os
from datetime import timedelta
from typing import Union
import numpy as np
import pandas as pd
from aeon.utils.validation.collection import (
get_n_cases,
get_type,
has_missing,
is_equal_length,
is_univariate,
)
ACCEPTED_DATETIME_TYPES = np.datetime64, pd.Timestamp
ACCEPTED_TIMEDELTA_TYPES = pd.Timedelta, timedelta, np.timedelta64
ACCEPTED_DATEOFFSET_TYPES = pd.DateOffset
ACCEPTED_WINDOW_LENGTH_TYPES = Union[
int, float, Union[ACCEPTED_TIMEDELTA_TYPES], Union[ACCEPTED_DATEOFFSET_TYPES]
]
NON_FLOAT_WINDOW_LENGTH_TYPES = Union[
int, Union[ACCEPTED_TIMEDELTA_TYPES], Union[ACCEPTED_DATEOFFSET_TYPES]
]
def is_array(x) -> bool:
"""Check if x is either a list or np.ndarray."""
return isinstance(x, (list, np.ndarray))
def is_int(x) -> bool:
"""Check if x is of integer type, but not boolean."""
# boolean are subclasses of integers in Python, so explicitly exclude them
return (
isinstance(x, (int, np.integer))
and not isinstance(x, bool)
and not isinstance(x, np.timedelta64)
)
def is_float(x) -> bool:
"""Check if x is of float type."""
return isinstance(x, (float, np.floating))
def is_timedelta(x) -> bool:
"""Check if x is of timedelta type."""
return isinstance(x, ACCEPTED_TIMEDELTA_TYPES)
def is_datetime(x) -> bool:
"""Check if x is of datetime type."""
return isinstance(x, ACCEPTED_DATETIME_TYPES)
def is_date_offset(x) -> bool:
"""Check if x is of pd.DateOffset type."""
return isinstance(x, ACCEPTED_DATEOFFSET_TYPES)
def is_timedelta_or_date_offset(x) -> bool:
"""Check if x is of timedelta or pd.DateOffset type."""
return is_timedelta(x=x) or is_date_offset(x=x)
def array_is_int(x) -> bool:
"""Check if array is of integer type."""
return all([is_int(value) for value in x])
def array_is_datetime64(x) -> bool:
"""Check if array is of np.datetime64 type."""
return all([is_datetime(value) for value in x])
def array_is_timedelta_or_date_offset(x) -> bool:
"""Check if array is timedelta or pd.DateOffset type."""
return all([is_timedelta_or_date_offset(value) for value in x])
def is_iterable(x) -> bool:
"""Check if input is iterable."""
try:
iter(x)
except TypeError:
return False
else:
return True
def is_iloc_like(x) -> bool:
"""Check if input is .iloc friendly."""
if is_iterable(x):
return array_is_int(x)
else:
return is_int(x)
def METHOD_NAME(x) -> bool:
"""Check if input is time-like (pd.Timedelta, pd.DateOffset, etc.)."""
if is_iterable(x):
return array_is_timedelta_or_date_offset(x) or array_is_datetime64(x)
else:
return is_timedelta_or_date_offset(x) or is_datetime(x)
def all_inputs_are_iloc_like(args: list) -> bool:
"""Check if all inputs in the list are .iloc friendly."""
return all([is_iloc_like(x) if x is not None else True for x in args])
def all_inputs_are_time_like(args: list) -> bool:
"""Check if all inputs in teh list are time-like."""
return all([METHOD_NAME(x) if x is not None else True for x in args])
def check_n_jobs(n_jobs: int) -> int:
"""Check `n_jobs` parameter according to the scikit-learn convention.
https://scikit-learn.org/stable/glossary.html#term-n_jobs
Parameters
----------
n_jobs : int or None
The number of jobs for parallelization.
If None or 0, 1 is used.
If negative, (n_cpus + 1 + n_jobs) is used. In such a case, -1 would use all
available CPUs and -2 would use all but one. If the number of CPUs used would
fall under 1, 1 is returned instead.
Returns
-------
n_jobs : int
The number of threads to be used.
"""
if n_jobs is None or n_jobs == 0:
return 1
elif not is_int(n_jobs):
raise ValueError(f"`n_jobs` must be None or an integer, but found: {n_jobs}")
elif n_jobs < 0:
return max(1, os.cpu_count() + 1 + n_jobs)
else:
return n_jobs
def check_window_length(
window_length: ACCEPTED_WINDOW_LENGTH_TYPES,
n_timepoints: int = None,
name: str = "window_length",
) -> NON_FLOAT_WINDOW_LENGTH_TYPES:
"""Validate window length.
Parameters
----------
window_length: positive int, positive float in (0, 1), positive timedelta,
positive pd.DateOffset, or None
The window length:
- If int, the total number of time points.
- If float, the fraction of time points relative to `n_timepoints`.
- If timedelta, length in corresponding time units
- If pd.DateOffset, length in corresponding time units following calendar rules
n_timepoints: positive int, optional (default=None)
The number of time points to which to apply `window_length` when
passed as a float (fraction). Will be ignored if `window_length` is
an integer.
name: str
Name of argument for error messages.
Returns
-------
window_length: int or timedelta or pd.DateOffset
"""
if window_length is None:
return window_length
elif is_int(window_length) and window_length >= 0:
return window_length
elif is_float(window_length) and 0 < window_length < 1:
# Check `n_timepoints`.
if not is_int(n_timepoints) or n_timepoints < 2:
raise ValueError(
f"`n_timepoints` must be a positive integer, but found:"
f" {n_timepoints}."
)
# Compute fraction relative to `n_timepoints`.
return int(np.ceil(window_length * n_timepoints))
elif is_timedelta(window_length) and window_length > timedelta(0):
return window_length
elif is_date_offset(window_length) and pd.Timestamp(
0
) + window_length > pd.Timestamp(0):
return window_length
else:
raise ValueError(
f"`{name}` must be a positive integer >= 0, or "
f"float in (0, 1) or None, but found: {window_length}."
) |
298,694 | do predict | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from pprint import pprint
import paddle
from paddlenlp.ops import FasterPegasus
from paddlenlp.transformers import (
PegasusChineseTokenizer,
PegasusForConditionalGeneration,
)
from paddlenlp.utils.log import logger
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name_or_path",
default="IDEA-CCNL/Randeng-Pegasus-238M-Summary-Chinese",
type=str,
help="The model name to specify the Pegasus to use. ",
)
parser.add_argument(
"--export_output_dir", default="./inference_model", type=str, help="Path to save inference model of Pegasus. "
)
parser.add_argument("--topk", default=4, type=int, help="The number of candidate to procedure top_k sampling. ")
parser.add_argument(
"--topp", default=1.0, type=float, help="The probability threshold to procedure top_p sampling. "
)
parser.add_argument("--max_out_len", default=64, type=int, help="Maximum output length. ")
parser.add_argument("--min_out_len", default=1, type=int, help="Minimum output length. ")
parser.add_argument("--num_return_sequence", default=1, type=int, help="The number of returned sequence. ")
parser.add_argument("--temperature", default=1.0, type=float, help="The temperature to set. ")
parser.add_argument("--num_return_sequences", default=1, type=int, help="The number of returned sequences. ")
parser.add_argument("--use_fp16_decoding", action="store_true", help="Whether to use fp16 decoding to predict. ")
parser.add_argument(
"--decoding_strategy",
default="beam_search",
choices=["beam_search"],
type=str,
help="The main strategy to decode. ",
)
parser.add_argument("--num_beams", default=4, type=int, help="The number of candidate to procedure beam search. ")
parser.add_argument(
"--diversity_rate", default=0.0, type=float, help="The diversity rate to procedure beam search. "
)
parser.add_argument(
"--length_penalty",
default=0.0,
type=float,
help="The exponential penalty to the sequence length in the beam_search strategy. ",
)
args = parser.parse_args()
return args
def METHOD_NAME(args):
place = "gpu"
place = paddle.set_device(place)
model_name_or_path = args.model_name_or_path
model = PegasusForConditionalGeneration.from_pretrained(model_name_or_path)
tokenizer = PegasusChineseTokenizer.from_pretrained(model_name_or_path)
pegasus = FasterPegasus(model=model, use_fp16_decoding=args.use_fp16_decoding, trans_out=True)
# Set evaluate mode
pegasus.eval()
# Convert dygraph model to static graph model
pegasus = paddle.jit.to_static(
pegasus,
input_spec=[
# input_ids
paddle.static.InputSpec(shape=[None, None], dtype="int32"),
# encoder_output
None,
# seq_len
None,
# min_length
args.min_out_len,
# max_length
args.max_out_len,
# num_beams. Used for beam_search.
args.num_beams,
# decoding_strategy
args.decoding_strategy,
# decoder_start_token_id
model.decoder_start_token_id,
# bos_token_id
tokenizer.bos_token_id,
# eos_token_id
tokenizer.eos_token_id,
# pad_token_id
tokenizer.pad_token_id,
# diversity rate. Used for beam search.
args.diversity_rate,
# length_penalty
args.length_penalty,
# topk
args.topk,
# topp
args.topp,
# temperature
args.temperature,
# num_return_sequences
args.num_return_sequences,
],
)
# Save converted static graph model
paddle.jit.save(pegasus, os.path.join(args.export_output_dir, "pegasus"))
logger.info("PEGASUS has been saved to {}.".format(args.export_output_dir))
if __name__ == "__main__":
args = parse_args()
pprint(args)
METHOD_NAME(args) |
298,695 | test rgb msk int | """Dataset mask test."""
from affine import Affine
import numpy as np
import pytest
from affine import Affine
import rasterio
from rasterio.enums import Resampling
from rasterio.errors import NodataShadowWarning
from rasterio.crs import CRS
# Setup test arrays
red = np.array([[0, 0, 0],
[0, 1, 1],
[1, 0, 1]]).astype('uint8') * 255
grn = np.array([[0, 0, 0],
[1, 0, 1],
[1, 0, 1]]).astype('uint8') * 255
blu = np.array([[0, 0, 0],
[1, 1, 0],
[1, 0, 1]]).astype('uint8') * 255
# equivalent to alp = red | grn | blu
# valid data anywhere there is at least one R, G or B value
alp = np.array([[0, 0, 0],
[1, 1, 1],
[1, 0, 1]]).astype('uint8') * 255
# mask might be constructed using different tools
# and differ from a strict interpretation of rgb values
msk = np.array([[0, 0, 0],
[1, 1, 1],
[1, 1, 1]]).astype('uint8') * 255
alldata = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]).astype('uint8') * 255
# boundless window ((1, 4, (1, 4))
alp_shift_lr = np.array([[1, 1, 0],
[0, 1, 0],
[0, 0, 0]]).astype('uint8') * 255
# whole mask resampled to (1, 5, 5) array
resampmask = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 0, 1, 1]]).astype('uint8') * 255
# whole mask resampled to (1, 5, 5) array
resampave = np.array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1]]).astype('uint8') * 255
@pytest.fixture(scope='function')
def tiffs(tmpdir):
_profile = {
'transform': Affine(5.0, 0.0, 0.0, 0.0, -5.0, 0.0),
'crs': CRS({'init': 'epsg:4326'}),
'driver': 'GTiff',
'dtype': 'uint8',
'height': 3,
'width': 3}
# 1. RGB without nodata value
prof = _profile.copy()
prof['count'] = 3
prof['nodata'] = None
with rasterio.open(str(tmpdir.join('rgb_no_ndv.tif')), 'w', **prof) as dst:
dst.write(red, 1)
dst.write(grn, 2)
dst.write(blu, 3)
# 2. RGB with nodata value
prof = _profile.copy()
prof['count'] = 3
prof['nodata'] = 0
with rasterio.open(str(tmpdir.join('rgb_ndv.tif')), 'w', **prof) as dst:
dst.write(red, 1)
dst.write(grn, 2)
dst.write(blu, 3)
# 3. RGBA without nodata value
prof = _profile.copy()
prof['count'] = 4
prof['nodata'] = None
with rasterio.open(str(tmpdir.join('rgba_no_ndv.tif')), 'w', **prof) as dst:
dst.write(red, 1)
dst.write(grn, 2)
dst.write(blu, 3)
dst.write(alp, 4)
# 4. RGBA with nodata value
prof = _profile.copy()
prof['count'] = 4
prof['nodata'] = 0
with rasterio.open(str(tmpdir.join('rgba_ndv.tif')), 'w', **prof) as dst:
dst.write(red, 1)
dst.write(grn, 2)
dst.write(blu, 3)
dst.write(alp, 4)
# 5. RGB with msk
prof = _profile.copy()
prof['count'] = 3
with rasterio.open(str(tmpdir.join('rgb_msk.tif')), 'w', **prof) as dst:
dst.write(red, 1)
dst.write(grn, 2)
dst.write(blu, 3)
dst.write_mask(msk)
# 6. RGB with msk (internal)
prof = _profile.copy()
prof['count'] = 3
with rasterio.Env(GDAL_TIFF_INTERNAL_MASK=True):
with rasterio.open(str(tmpdir.join('rgb_msk_internal.tif')),
'w', **prof) as dst:
dst.write(red, 1)
dst.write(grn, 2)
dst.write(blu, 3)
dst.write_mask(msk)
# 7. RGBA with msk
prof = _profile.copy()
prof['count'] = 4
with rasterio.open(str(tmpdir.join('rgba_msk.tif')), 'w', **prof) as dst:
dst.write(red, 1)
dst.write(grn, 2)
dst.write(blu, 3)
dst.write(alp, 4)
dst.write_mask(msk)
return tmpdir
def test_no_ndv(tiffs):
with rasterio.open(str(tiffs.join('rgb_no_ndv.tif'))) as src:
assert np.array_equal(src.dataset_mask(), alldata)
def test_rgb_ndv(tiffs):
with rasterio.open(str(tiffs.join('rgb_ndv.tif'))) as src:
res = src.dataset_mask()
assert res.dtype.name == "uint8"
assert np.array_equal(src.dataset_mask(), alp)
def test_rgba_no_ndv(tiffs):
with rasterio.open(str(tiffs.join('rgba_no_ndv.tif'))) as src:
assert np.array_equal(src.dataset_mask(), alp)
def test_rgba_ndv(tiffs):
with rasterio.open(str(tiffs.join('rgba_ndv.tif'))) as src:
with pytest.warns(NodataShadowWarning):
res = src.dataset_mask()
assert np.array_equal(res, alp)
def test_rgb_msk(tiffs):
with rasterio.open(str(tiffs.join('rgb_msk.tif'))) as src:
assert np.array_equal(src.dataset_mask(), msk)
# each band's mask is also equal
for bmask in src.read_masks():
assert np.array_equal(bmask, msk)
def METHOD_NAME(tiffs):
with rasterio.open(str(tiffs.join('rgb_msk_internal.tif'))) as src:
assert np.array_equal(src.dataset_mask(), msk)
def test_rgba_msk(tiffs):
with rasterio.open(str(tiffs.join('rgba_msk.tif'))) as src:
# mask takes precendent over alpha
assert np.array_equal(src.dataset_mask(), msk)
@pytest.mark.parametrize("kwds,expected", [(dict(window=((1, 4), (1, 4)), boundless=True), alp_shift_lr), (dict(out_shape=(1, 5, 5)), resampmask), (dict(out=np.zeros((1, 5, 5), dtype=np.uint8)), resampmask)])
def test_kwargs(tiffs, kwds, expected):
with rasterio.open(str(tiffs.join('rgb_ndv.tif'))) as src:
result = src.dataset_mask(**kwds)
assert np.array_equal(expected, result)
def test_indexes_not_supported(tiffs):
with rasterio.open(str(tiffs.join('rgb_ndv.tif'))) as src:
with pytest.raises(TypeError):
src.dataset_mask(indexes=1)
def test_kwargs_resampling(tiffs):
with rasterio.open(str(tiffs.join('rgb_ndv.tif'))) as src:
other = src.dataset_mask(out_shape=(1, 5, 5), resampling=Resampling.bilinear) != 0
other = other.astype(np.uint8) * 255
assert np.array_equal(resampave, other) |
298,696 | test selected conversation has downloadable files false | import unittest
from collections import namedtuple
from unittest import mock
from PyQt5.QtTest import QSignalSpy
from securedrop_client import state
from tests.helper import app # noqa: F401
Source = namedtuple("Source", ["uuid"])
File = namedtuple("File", ["uuid", "source", "is_downloaded"])
class TestState(unittest.TestCase):
def setUp(self):
self.state = state.State()
def test_selected_conversation_is_unset_by_default(self):
assert self.state.selected_conversation is None
def test_selected_conversation_can_be_updated(self):
self.state.selected_conversation = "0"
assert self.state.selected_conversation == "0"
# File identifiers can be of any shape.
self.state.selected_conversation = 1
assert self.state.selected_conversation == 1
def test_selected_conversation_can_be_set_from_an_optional_source_id_and_cleared(self):
source_id = state.SourceId("some_id")
self.state.set_selected_conversation_for_source(source_id)
assert self.state.selected_conversation == state.ConversationId("some_id")
self.state.clear_selected_conversation()
assert self.state.selected_conversation is None
def test_add_file_does_not_duplicate_information(self):
self.state.add_file(5, 1)
self.state.add_file(5, 7)
assert len(self.state.conversation_files(5)) == 2
self.state.add_file(5, 7)
assert len(self.state.conversation_files(5)) == 2
def test_remove_conversation_files_removes_all_conversation_files(self):
self.state.add_file(7, 3)
self.state.add_file(7, 1)
assert len(self.state.conversation_files(7)) == 2
self.state.remove_conversation_files(7)
assert len(self.state.conversation_files(7)) == 0
def test_remove_conversation_files_handles_missing_files_gracefully(self):
self.state.remove_conversation_files(8)
assert len(self.state.conversation_files(8)) == 0
def test_conversation_files_is_empty_by_default(self):
assert len(self.state.conversation_files(2)) == 0
def test_conversation_files_returns_the_conversation_files(self):
self.state.add_file(4, 1)
self.state.add_file(4, 7)
self.state.add_file(4, 3)
assert len(self.state.conversation_files(4)) == 3
self.state.add_file(4, 8)
assert len(self.state.conversation_files(4)) == 4
def test_records_downloads(self):
some_file_id = state.FileId("X")
another_file_id = state.FileId("Y")
self.state.add_file("4", some_file_id)
self.state.add_file("4", another_file_id)
files = self.state.conversation_files("4")
assert len(files) == 2
assert not files[0].is_downloaded
assert not files[1].is_downloaded
self.state.record_file_download(some_file_id)
assert len(files) == 2
assert files[0].is_downloaded
assert not files[1].is_downloaded
def test_record_downloads_ignores_missing_files(self):
missing_file_id = state.FileId("missing")
self.state.record_file_download(missing_file_id)
assert True
def test_selected_conversation_files_changed_signal_is_emited_when_meaningful(self):
signal_emissions = QSignalSpy(self.state.selected_conversation_files_changed)
# when the selected conversation changed
self.state.selected_conversation = 1
assert len(signal_emissions) == 1
# NOT when a file is added to a conversation that's not the selected one
self.state.add_file("some_conversation_id", "file_id")
assert len(signal_emissions) == 1 # the signal wasn't emitted again
# when a known file was downloaded
self.state.record_file_download("file_id")
assert len(signal_emissions) == 2
# when a file is added to the selected conversation
self.state.add_file(1, "some_file_id")
assert len(signal_emissions) == 3
# NOT when files are removed from a conversation that's not the selected one
self.state.remove_conversation_files("some_conversation_id")
assert len(signal_emissions) == 3 # the signal wasn't emitted again
# when the selected conversation files are removed
self.state.remove_conversation_files(1)
assert len(signal_emissions) == 4
def test_selected_conversation_has_downloadable_files_false_by_default(self):
assert not self.state.selected_conversation_has_downloadable_files
def METHOD_NAME(self):
self.state.selected_conversation = 1
self.state.add_file(1, "some_file_id")
self.state.add_file(1, "another_file_id")
self.state.add_file("conversation that's not selected", "unrelated_file")
self.state.file("unrelated_file").is_downloaded = False # to be explicit
self.state.file("some_file_id").is_downloaded = True
self.state.file("another_file_id").is_downloaded = True
assert not self.state.selected_conversation_has_downloadable_files
self.state.file("some_file_id").is_downloaded = False
assert self.state.selected_conversation_has_downloadable_files
def test_gets_initialized_when_created_with_a_database(self):
source = Source(uuid="id")
file_1 = File(uuid="one", source=source, is_downloaded=True)
file_2 = File(uuid="two", source=source, is_downloaded=False)
database = mock.MagicMock()
database.get_files = mock.MagicMock(return_value=[file_1, file_2])
initialized_state = state.State(database)
assert initialized_state.file(state.FileId("one")).is_downloaded
assert not initialized_state.file(state.FileId("two")).is_downloaded
assert len(initialized_state.conversation_files(state.ConversationId("id"))) == 2 |
298,697 | test period agg default | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Unit tests for the quality assurance
Level classes.
"""
import pytest
from flowmachine.core import make_spatial_unit
from flowmachine.core.errors import InvalidSpatialUnitError
from flowmachine.features import TotalNetworkObjects, AggregateNetworkObjects
def test_tno_at_lon_lat(get_dataframe):
"""
Regression test for #108. TNO should work at lon-lat level.
"""
tno = TotalNetworkObjects(
start="2016-01-01",
stop="2016-01-07",
network_object=make_spatial_unit("versioned-cell"),
spatial_unit=make_spatial_unit("lon-lat"),
)
assert tno.get_dataframe().value.sum() == 330
@pytest.mark.parametrize(
"stat, expected",
[
("avg", 30.541666666666668),
("max", 38),
("min", 21),
("median", 31.0),
("mode", 27),
("stddev", 4.096437122848253),
("variance", 16.780797101449277),
],
)
def test_aggregate_returns_correct_values(stat, expected, get_dataframe):
"""
AggregateNetworkObjects returns correct values.
"""
instance = AggregateNetworkObjects(
total_network_objects=TotalNetworkObjects(
start="2016-01-01", stop="2016-12-30", table="calls", total_by="hour"
),
statistic=stat,
)
df = get_dataframe(instance)
#
# This will compare the very first
# value with an independently
# computed value.
#
assert pytest.approx(df.value[0]) == expected
def test_count_returns_correct_values(get_dataframe):
"""
TotalNetworkObjects returns correct values.
"""
instance = TotalNetworkObjects(
start="2016-01-01", stop="2016-12-30", table="calls", total_by="hour"
)
df = get_dataframe(instance)
#
# This will compare the very first
# value with an independently
# computed value.
#
assert df.value[34] == 31
def test_bad_total_by():
"""Test value errors are raised for bad 'total_by' param"""
with pytest.raises(ValueError):
TotalNetworkObjects(
start="2016-01-01",
stop="2016-12-30",
table="calls",
total_by="BAD_TOTAL_BY",
)
@pytest.mark.parametrize(
"bad_arg, spatial_unit_type",
[("spatial_unit", "cell"), ("network_object", "lon-lat")],
)
def test_bad_spatial_units(bad_arg, spatial_unit_type):
"""
Test InvalidSpatialUnitErrors are raised for bad 'network_object' or
'spatial_unit' params.
"""
su = make_spatial_unit(spatial_unit_type)
with pytest.raises(InvalidSpatialUnitError):
TotalNetworkObjects(
start="2016-01-01", stop="2016-12-30", table="calls", **{bad_arg: su}
)
def test_bad_aggregate_by():
"""Test that invalid 'aggregate_by' param raises value error."""
with pytest.raises(ValueError):
AggregateNetworkObjects(
total_network_objects=TotalNetworkObjects(
start="2016-01-01", stop="2016-12-30", table="calls"
),
aggregate_by="BAD_AGGREGATE_BY",
)
def test_bad_statistic():
"""Test that invalid stat for aggregate raises value error."""
with pytest.raises(ValueError):
AggregateNetworkObjects(
total_network_objects=TotalNetworkObjects(
start="2016-01-01", stop="2016-12-30", table="calls"
),
statistic="BAD STAT",
)
def test_median_returns_correct_values(get_dataframe):
"""
features.network.TotalNetworkObjects median aggregate returns correct values.
"""
instance = AggregateNetworkObjects(
total_network_objects=TotalNetworkObjects(
table="calls",
total_by="hour",
network_object=make_spatial_unit("versioned-site"),
),
aggregate_by="day",
statistic="median",
)
#
# This will compare the very first
# value with an independently
# computed value.
#
assert get_dataframe(instance).head(1)["value"][0] == 25
def test_mean_returns_correct_values(get_dataframe):
"""
features.network.TotalNetworkObjects aggregation returns correct values.
"""
instance = AggregateNetworkObjects(
total_network_objects=TotalNetworkObjects(
start="2016-01-01",
stop="2016-12-30",
total_by="hour",
network_object=make_spatial_unit("versioned-site"),
),
aggregate_by="day",
)
#
# This will compare the very first
# value with an independently
# computed value.
#
assert get_dataframe(instance).head(1)["value"][0] == pytest.approx(28.7916666666)
@pytest.mark.parametrize(
"total_by, aggregate_by_expected",
[
("second", "minute"),
("minute", "hour"),
("hour", "day"),
("day", "month"),
("month", "year"),
("year", "century"),
],
)
def METHOD_NAME(total_by, aggregate_by_expected):
"""Correct aggregation period is deduced."""
inst = AggregateNetworkObjects(
total_network_objects=TotalNetworkObjects(
start="2016-01-01", stop="2016-12-30", total_by=total_by
)
)
assert inst.aggregate_by == aggregate_by_expected |
298,698 | test ipv6 local | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
from binascii import unhexlify, hexlify
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def METHOD_NAME():
'''
Check for (local) IPv6 support.
'''
import socket
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6 |
298,699 | terminal | from django.contrib.auth.models import User
from django.db import models
from tom_targets.models import Target
from tom_observations.facility import get_service_class
from tom_common.hooks import run_hook
class ObservationRecord(models.Model):
"""
Class representing an observation in a TOM.
A ObservationRecord corresponds with any set of related exposures at a facility, and is associated with a single
target.
:param target: The ``Target`` with which this object is associated.
:type target: Target
:param facility: The facility at which this observation is taken. Should be the name specified in the corresponding
TOM facility module, if one exists.
:type facility: str
:param parameters: The set of parameters used in the API request made to create the observation
:type parameters: dict
:param status: The current status of the observation. Should be a valid status in the corresponding TOM facility
module, if one exists.
:type status: str
:param scheduled_start: The time at which the observation is scheduled to begin, according to the facility.
:type scheduled_start: datetime
:param scheduled_end: The time at which the observation is scheduled to end, according to the facility.
:type scheduled_end: datetime
:param created: The time at which this object was created.
:type created: datetime
:param modified: The time at which this object was last updated.
:type modified: datetime
"""
target = models.ForeignKey(Target, on_delete=models.CASCADE)
user = models.ForeignKey(User, null=True, default=None, on_delete=models.DO_NOTHING)
facility = models.CharField(max_length=50)
parameters = models.JSONField()
observation_id = models.CharField(max_length=255)
status = models.CharField(max_length=200)
scheduled_start = models.DateTimeField(null=True)
scheduled_end = models.DateTimeField(null=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-created',)
def save(self, *args, **kwargs):
if self.id:
presave_data = ObservationRecord.objects.get(pk=self.id)
super().save(*args, **kwargs)
if self.status != presave_data.status:
run_hook('observation_change_state', self, presave_data.status)
else:
super().save(*args, **kwargs)
run_hook('observation_change_state', self, None)
@property
def METHOD_NAME(self):
facility = get_service_class(self.facility)
return self.status in facility().get_terminal_observing_states()
@property
def failed(self):
facility = get_service_class(self.facility)
return self.status in facility().get_failed_observing_states()
@property
def url(self):
facility = get_service_class(self.facility)
return facility().get_observation_url(self.observation_id)
def update_status(self):
facility = get_service_class(self.facility)
facility().update_observation_status(self.id)
def save_data(self):
facility = get_service_class(self.facility)
facility().save_data_products(self)
def __str__(self):
return '{0} @ {1}'.format(self.target, self.facility)
class ObservationGroup(models.Model):
"""
Class representing a logical group of observations.
:param name: The name of the grouping.
:type name: str
:param observation_records: Set of ``ObservationRecord`` objects associated with this ``ObservationGroup``
:param created: The time at which this ``ObservationGroup`` was created.
:type created: datetime
:param modified: The time at which this ``ObservationGroup`` was modified.
:type modified: datetime
"""
name = models.CharField(max_length=50)
observation_records = models.ManyToManyField(ObservationRecord)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-created', 'name',)
def __str__(self):
return self.name
class DynamicCadence(models.Model):
"""
Class representing a dynamic cadence--that is, a cadence that follows a pattern but modifies its behavior
depending on the result of prior observations.
:param observation_group: The ``ObservationGroup`` containing the observations that were created by this cadence.
:type observation_group: ``ObservationGroup``
:param cadence_strategy: The name of the cadence strategy this cadence is using.
:type cadence_strategy: str
:param cadence_parameters: The parameters for this cadence, e.g. cadence period
:type cadence_parameters: JSON
:param active: Whether or not this cadence should continue to submit observations
:type active: boolean
:param created: The time at which this ``DynamicCadence`` was created.
:type created: datetime
:param modified: The time at which this ``DynamicCadence`` was modified.
:type modified: datetime
"""
observation_group = models.ForeignKey(ObservationGroup, null=False, default=None, on_delete=models.CASCADE)
cadence_strategy = models.CharField(max_length=100, blank=False, default=None,
verbose_name='Cadence strategy used for this DynamicCadence')
cadence_parameters = models.JSONField(blank=False, null=False, verbose_name='Cadence-specific parameters')
active = models.BooleanField(verbose_name='Active',
help_text='''Whether or not this DynamicCadence should
continue to submit observations.''')
created = models.DateTimeField(auto_now_add=True, help_text='The time which this DynamicCadence was created.')
modified = models.DateTimeField(auto_now=True, help_text='The time which this DynamicCadence was modified.')
def __str__(self):
return f'{self.cadence_strategy} with parameters {self.cadence_parameters}'
class ObservationTemplate(models.Model):
"""
Class representing an observation template.
:param name: The name of the ``ObservationTemplate``
:type name: str
:param facility: The module-specified facility name for which the template is valid
:type facility: str
:param parameters: Observing parameters
:type parameters: dict
:param created: The time at which this ``ObservationTemplate`` was created.
:type created: datetime
:param modified: The time at which this ``ObservationTemplate`` was modified.
:type modified: datetime
"""
name = models.CharField(max_length=200)
facility = models.CharField(max_length=50)
parameters = models.JSONField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.