blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1874c920beea17a60c6d77dda6e1f3ae89c47fba
|
bdfd36be0da37a335cabefd41db9df44c31f3b42
|
/scripts/modules/deeplab.py
|
9fd96145838222b3f42b6d0c305b994ecfb4284e
|
[
"BSD-3-Clause",
"HPND",
"CC-BY-NC-SA-4.0"
] |
permissive
|
mapillary/inplace_abn
|
54435a7b688da075e37d36d4c2fc09df43a9983d
|
d7dd3e1f22164083734ad6ac42365a4cc99a5051
|
refs/heads/main
| 2023-08-19T19:46:05.503129
| 2023-01-03T10:31:21
| 2023-01-03T10:31:21
| 111,797,719
| 1,377
| 216
|
BSD-3-Clause
| 2023-07-17T08:13:53
| 2017-11-23T10:47:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,085
|
py
|
deeplab.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as functional
from inplace_abn import ABN
from models.util import try_index
class DeeplabV3(nn.Module):
def __init__(
self,
in_channels,
out_channels,
hidden_channels=256,
dilations=(12, 24, 36),
norm_act=ABN,
pooling_size=None,
):
super(DeeplabV3, self).__init__()
self.pooling_size = pooling_size
self.map_convs = nn.ModuleList(
[
nn.Conv2d(in_channels, hidden_channels, 1, bias=False),
nn.Conv2d(
in_channels,
hidden_channels,
3,
bias=False,
dilation=dilations[0],
padding=dilations[0],
),
nn.Conv2d(
in_channels,
hidden_channels,
3,
bias=False,
dilation=dilations[1],
padding=dilations[1],
),
nn.Conv2d(
in_channels,
hidden_channels,
3,
bias=False,
dilation=dilations[2],
padding=dilations[2],
),
]
)
self.map_bn = norm_act(hidden_channels * 4)
self.global_pooling_conv = nn.Conv2d(
in_channels, hidden_channels, 1, bias=False
)
self.global_pooling_bn = norm_act(hidden_channels)
self.red_conv = nn.Conv2d(hidden_channels * 4, out_channels, 1, bias=False)
self.pool_red_conv = nn.Conv2d(hidden_channels, out_channels, 1, bias=False)
self.red_bn = norm_act(out_channels)
self.reset_parameters(self.map_bn.activation, self.map_bn.activation_param)
def reset_parameters(self, activation, slope):
gain = nn.init.calculate_gain(activation, slope)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data, gain)
if hasattr(m, "bias") and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, ABN):
if hasattr(m, "weight") and m.weight is not None:
nn.init.constant_(m.weight, 1)
if hasattr(m, "bias") and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
# Map convolutions
out = torch.cat([m(x) for m in self.map_convs], dim=1)
out = self.map_bn(out)
out = self.red_conv(out)
# Global pooling
pool = self._global_pooling(x)
pool = self.global_pooling_conv(pool)
pool = self.global_pooling_bn(pool)
pool = self.pool_red_conv(pool)
if self.training or self.pooling_size is None:
pool = pool.repeat(1, 1, x.size(2), x.size(3))
out += pool
out = self.red_bn(out)
return out
def _global_pooling(self, x):
if self.training or self.pooling_size is None:
pool = x.view(x.size(0), x.size(1), -1).mean(dim=-1)
pool = pool.view(x.size(0), x.size(1), 1, 1)
else:
pooling_size = (
min(try_index(self.pooling_size, 0), x.shape[2]),
min(try_index(self.pooling_size, 1), x.shape[3]),
)
padding = (
(pooling_size[1] - 1) // 2,
(pooling_size[1] - 1) // 2
if pooling_size[1] % 2 == 1
else (pooling_size[1] - 1) // 2 + 1,
(pooling_size[0] - 1) // 2,
(pooling_size[0] - 1) // 2
if pooling_size[0] % 2 == 1
else (pooling_size[0] - 1) // 2 + 1,
)
pool = functional.avg_pool2d(x, pooling_size, stride=1)
pool = functional.pad(pool, pad=padding, mode="replicate")
return pool
|
278d4dcc7fe977c743eadcbfa630f37fa140f758
|
a2b20597759990445081057d35d113434cfcf970
|
/stubs/typeshed/typeshed/stubs/influxdb-client/influxdb_client/service/bucket_schemas_service.pyi
|
276cf48b5f9175196dd744848793c01f26f7147e
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 1,418
|
pyi
|
bucket_schemas_service.pyi
|
from _typeshed import Incomplete
from influxdb_client.service._base_service import _BaseService
class BucketSchemasService(_BaseService):
def __init__(self, api_client: Incomplete | None = ...) -> None: ...
def create_measurement_schema(self, bucket_id, measurement_schema_create_request, **kwargs): ...
def create_measurement_schema_with_http_info(self, bucket_id, measurement_schema_create_request, **kwargs): ...
async def create_measurement_schema_async(self, bucket_id, measurement_schema_create_request, **kwargs): ...
def get_measurement_schema(self, bucket_id, measurement_id, **kwargs): ...
def get_measurement_schema_with_http_info(self, bucket_id, measurement_id, **kwargs): ...
async def get_measurement_schema_async(self, bucket_id, measurement_id, **kwargs): ...
def get_measurement_schemas(self, bucket_id, **kwargs): ...
def get_measurement_schemas_with_http_info(self, bucket_id, **kwargs): ...
async def get_measurement_schemas_async(self, bucket_id, **kwargs): ...
def update_measurement_schema(self, bucket_id, measurement_id, measurement_schema_update_request, **kwargs): ...
def update_measurement_schema_with_http_info(
self, bucket_id, measurement_id, measurement_schema_update_request, **kwargs
): ...
async def update_measurement_schema_async(self, bucket_id, measurement_id, measurement_schema_update_request, **kwargs): ...
|
6dd5818642dd7e54310c90f44b69c299ba00df26
|
62179a165ec620ba967dbc20016e890978fbff50
|
/nncf/experimental/torch/quantization/quantize_model.py
|
c97ab9c9675b6b959331f3ddb32f0984e50fa1ac
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/nncf
|
91fcf153a96f85da166aacb7a70ca4941e4ba4a4
|
c027c8b43c4865d46b8de01d8350dd338ec5a874
|
refs/heads/develop
| 2023-08-24T11:25:05.704499
| 2023-08-23T14:44:05
| 2023-08-23T14:44:05
| 263,687,600
| 558
| 157
|
Apache-2.0
| 2023-09-14T17:06:41
| 2020-05-13T16:41:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,779
|
py
|
quantize_model.py
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Tuple
import torch
from nncf.common.quantization.structs import QuantizationPreset
from nncf.data import Dataset
from nncf.parameters import ModelType
from nncf.parameters import TargetDevice
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from nncf.scopes import IgnoredScope
from nncf.torch.dynamic_graph.context import no_nncf_trace
from nncf.torch.dynamic_graph.io_handling import replicate_same_tensors
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_inputs_with_objwalk
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_outputs_with_objwalk
from nncf.torch.nested_objects_traversal import objwalk
from nncf.torch.nncf_network import NNCFNetwork
from nncf.torch.utils import get_model_device
from nncf.torch.utils import is_tensor
from nncf.torch.utils import training_mode_switcher
def create_nncf_network(model: torch.nn.Module, dataset: Dataset) -> NNCFNetwork:
"""
Creates NNCFNetwork instance for the PyTorch model where the first item of dataset
is used for model tracing.
:param model: PyTorch model
:param dataset: Dataset for model tracing
:return: NNCFNetwork instance for the input model
"""
def get_inputs(dataloader_output: Any) -> Tuple[Tuple, Dict]:
if not isinstance(dataloader_output, tuple):
dataloader_output = (dataloader_output,)
return dataloader_output, {}
def wrap_inputs(args, kwargs):
return wrap_nncf_model_inputs_with_objwalk(args, kwargs)
def wrap_outputs(retval):
return wrap_nncf_model_outputs_with_objwalk(retval)
def create_dummy_forward_fn(dataset, device):
def dummy_forward(model):
with no_nncf_trace():
args = next(iter(dataset.get_inference_data()))
args, kwargs = get_inputs(args)
def send_to_device(tensor):
return tensor.to(device)
args = objwalk(args, is_tensor, send_to_device)
kwargs = objwalk(kwargs, is_tensor, send_to_device)
args, kwargs = wrap_inputs(args, kwargs)
retval = model(*args, **kwargs)
retval = replicate_same_tensors(retval)
return wrap_outputs(retval)
return dummy_forward
device = get_model_device(model)
dummy_forward_fn = create_dummy_forward_fn(dataset, device)
with training_mode_switcher(model, is_training=False):
nncf_network = NNCFNetwork(
model, dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=wrap_inputs, wrap_outputs_fn=wrap_outputs
)
nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()
return nncf_network
def quantize_impl(
model: torch.nn.Module,
calibration_dataset: Dataset,
preset: QuantizationPreset,
target_device: TargetDevice,
subset_size: int,
fast_bias_correction: bool,
model_type: Optional[ModelType] = None,
ignored_scope: Optional[IgnoredScope] = None,
advanced_parameters: Optional[AdvancedQuantizationParameters] = None,
) -> torch.nn.Module:
"""
Experimental implementation of the `quantize()` method for the PyTorch backend.
"""
if fast_bias_correction is False:
raise ValueError(f"fast_bias_correction={fast_bias_correction} is not supported")
if target_device == TargetDevice.CPU_SPR:
raise RuntimeError("target_device == CPU_SPR is not supported")
nncf_network = create_nncf_network(model.eval(), calibration_dataset)
quantization_algorithm = PostTrainingQuantization(
preset=preset,
target_device=target_device,
subset_size=subset_size,
fast_bias_correction=fast_bias_correction,
model_type=model_type,
ignored_scope=ignored_scope,
advanced_parameters=advanced_parameters,
)
quantized_model = quantization_algorithm.apply(
nncf_network, nncf_network.nncf.get_graph(), dataset=calibration_dataset
)
quantized_model.nncf.disable_dynamic_graph_building()
return quantized_model
|
4453370a626abfc69bc26a1238138f6f046a1142
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/refactoring/unwrap/elIfDelete_after.py
|
3881394316b2e865bd969a76ff8903136a48889c
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 33
|
py
|
elIfDelete_after.py
|
if "1":
print 1
else:
print 3
|
e2c79bc348fe2736326a7564ea43130288a0b570
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/apps/oozie/examples/managed/shell/hello.py
|
674e44a8da6e7a8fd67fd54d7571f1cde8d43e8f
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 116
|
py
|
hello.py
|
#!/usr/bin/env python
from __future__ import print_function
import sys
print('Hello ' + ', '.join(sys.argv[1:]))
|
8492ce71b151ab56ad944247b2e73243dbb69dd1
|
b567d43949cee77b18b3d948ba0147b681014e24
|
/core/eolearn/core/utils/__init__.py
|
083ed9fcfeacc73dee5d9e706ab09104b77408d3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sentinel-hub/eo-learn
|
092630a1dc5b78ae2569c3ec4b0729dc6702958a
|
a65899e4632b50c9c41a67e1f7698c09b929d840
|
refs/heads/master
| 2023-08-31T09:32:31.775754
| 2023-05-24T10:35:30
| 2023-05-24T10:35:30
| 135,559,956
| 1,072
| 329
|
MIT
| 2023-09-07T06:08:40
| 2018-05-31T09:08:08
|
Python
|
UTF-8
|
Python
| false
| false
| 41
|
py
|
__init__.py
|
"""
A subfolder containing utilities
"""
|
73c6827db9d988835243ba99f43585325470dfaf
|
bba97d00eba0c3de8a081e61ed6711f138d6babd
|
/pipeline/api/layers/latest_region_package.py
|
ba801c1dda585e30e322b60fd15454581412bb71
|
[
"Apache-2.0"
] |
permissive
|
keithrozario/Klayers
|
a6df271a7f72c8b2ae9d2025ff030cff09bd5b75
|
026ebed4a3de0018418638e37d6453253aa48f0e
|
refs/heads/master
| 2023-09-04T12:33:32.454360
| 2023-09-04T08:44:18
| 2023-09-04T08:44:18
| 164,266,648
| 1,725
| 276
|
NOASSERTION
| 2023-09-04T08:44:19
| 2019-01-06T01:49:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
latest_region_package.py
|
import os
import json
import boto3
from botocore.exceptions import ClientError
from aws_lambda_powertools.logging import Logger
from common.dynamodb import map_keys
logger = Logger()
@logger.inject_lambda_context
def main(event, context):
"""
Args:
event.pathParameter.region: AWS region
event.pathParameter.package: Python Package
event.pathParameter.python_version: Python Version (e.g. p3.8, p3.9)
returns:
api_response: Dictionary containing, region, package, arn and requirements.txt data
"""
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(os.environ["DB_NAME"])
region = event.get("pathParameters").get("region")
package = event.get("pathParameters").get("package")
python_version = event.get("pathParameters").get("python_version", "p3.8")
pk = f"lyr#{region}:{package}:{python_version}"
sk = "lyrVrsn0#"
try:
response = table.get_item(
Key={"pk": pk, "sk": sk},
AttributesToGet=["rgn", "pckg", "arn", "rqrmntsTxt", "pckgVrsn"],
)
api_response = map_keys([response["Item"]])[0]
except ClientError as e:
logger.error(
{
"message": response["Error"]["Message"],
"pk": pk,
"sk": sk,
}
)
api_response = {}
except KeyError as e: # no item return
api_response = {}
return {
"statusCode": 200,
"headers": {"Content-Type": "application/json"},
"body": json.dumps(api_response),
}
|
72f5d407e7b77412036642e58bcd00aa62478d25
|
7030c780db36c7d8efedb1152cf945a3cc248fdb
|
/python/cuml/tests/dask/test_dask_label_binarizer.py
|
9af6ff189e6d9385912d74aa2b63a38c43b4ca20
|
[
"Apache-2.0"
] |
permissive
|
rapidsai/cuml
|
546af8151fd2ee0f737cc4e62386d4b0ede74f3d
|
7d86042b8de06bc8acce632230fe5821bd36c17d
|
refs/heads/branch-23.10
| 2023-08-30T19:17:41.816373
| 2023-08-28T13:23:15
| 2023-08-28T13:23:15
| 152,616,802
| 3,615
| 569
|
Apache-2.0
| 2023-09-14T00:21:52
| 2018-10-11T15:45:35
|
C++
|
UTF-8
|
Python
| false
| false
| 2,396
|
py
|
test_dask_label_binarizer.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.dask.preprocessing import LabelBinarizer
from cuml.testing.utils import array_equal
import dask
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
@pytest.mark.parametrize(
"labels",
[
([1, 4, 5, 2, 0, 1, 6, 2, 3, 4], [4, 2, 6, 3, 2, 0, 1]),
([9, 8, 2, 1, 3, 4], [8, 2, 1, 2, 2]),
],
)
@pytest.mark.parametrize("multipart", [True, False])
def test_basic_functions(labels, multipart, client):
fit_labels, xform_labels = labels
s = cp.asarray(fit_labels, dtype=np.int32)
df = dask.array.from_array(s)
s2 = cp.asarray(xform_labels, dtype=np.int32)
df2 = dask.array.from_array(s2)
if multipart:
df = df.rechunk((1,))
df2 = df2.rechunk((1,))
binarizer = LabelBinarizer(client=client, sparse_output=False)
binarizer.fit(df)
assert array_equal(
cp.asnumpy(binarizer.classes_), np.unique(cp.asnumpy(s))
)
xformed = binarizer.transform(df2)
xformed = xformed.map_blocks(lambda x: x.get(), dtype=cp.float32)
xformed.compute_chunk_sizes()
assert xformed.compute().shape[1] == binarizer.classes_.shape[0]
original = binarizer.inverse_transform(xformed)
test = original.compute()
assert array_equal(cp.asnumpy(test), xform_labels)
@pytest.mark.parametrize(
"labels",
[
([1, 4, 5, 2, 0, 1, 6, 2, 3, 4], [4, 2, 6, 3, 2, 0, 1]),
([9, 8, 2, 1, 3, 4], [8, 2, 1, 2, 2]),
],
)
@pytest.mark.xfail(
raises=ValueError,
reason="Sparse output disabled until "
"Dask supports sparse CuPy "
"arrays",
)
def test_sparse_output_fails(labels, client):
LabelBinarizer(client=client, sparse_output=True)
|
5523463e46ace8880c17f1afd145371ab21a4a92
|
1051e6bca955732982932e5946069beb1f9cfd6d
|
/stable_baselines/common/vec_env/vec_frame_stack.py
|
044102ebdd766ead93b745a046f5d12bc875b86d
|
[
"MIT"
] |
permissive
|
Stable-Baselines-Team/stable-baselines
|
a842f6edb270aeeefa66043fe2d0b119fa73e272
|
550db0d667533b9e94672fbbe6d391227f3d08e2
|
refs/heads/master
| 2023-05-11T16:27:33.911867
| 2022-09-05T07:35:56
| 2022-09-05T07:35:56
| 166,540,770
| 296
| 69
|
MIT
| 2023-04-29T12:06:44
| 2019-01-19T11:47:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,094
|
py
|
vec_frame_stack.py
|
import warnings
import numpy as np
from gym import spaces
from stable_baselines.common.vec_env.base_vec_env import VecEnvWrapper
class VecFrameStack(VecEnvWrapper):
"""
Frame stacking wrapper for vectorized environment
:param venv: (VecEnv) the vectorized environment to wrap
:param n_stack: (int) Number of frames to stack
"""
def __init__(self, venv, n_stack):
self.venv = venv
self.n_stack = n_stack
wrapped_obs_space = venv.observation_space
low = np.repeat(wrapped_obs_space.low, self.n_stack, axis=-1)
high = np.repeat(wrapped_obs_space.high, self.n_stack, axis=-1)
self.stackedobs = np.zeros((venv.num_envs,) + low.shape, low.dtype)
observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
observations, rewards, dones, infos = self.venv.step_wait()
last_ax_size = observations.shape[-1]
self.stackedobs = np.roll(self.stackedobs, shift=-last_ax_size, axis=-1)
for i, done in enumerate(dones):
if done:
if 'terminal_observation' in infos[i]:
old_terminal = infos[i]['terminal_observation']
new_terminal = np.concatenate(
(self.stackedobs[i, ..., :-last_ax_size], old_terminal), axis=-1)
infos[i]['terminal_observation'] = new_terminal
else:
warnings.warn(
"VecFrameStack wrapping a VecEnv without terminal_observation info")
self.stackedobs[i] = 0
self.stackedobs[..., -observations.shape[-1]:] = observations
return self.stackedobs, rewards, dones, infos
def reset(self):
"""
Reset all environments
"""
obs = self.venv.reset()
self.stackedobs[...] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs
def close(self):
self.venv.close()
|
fdb009f5bab829b429b7c9998aa4028daa0acfdf
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/nation_change_helpers/client_nation_change_helper.py
|
1c793234512f68031a0cca34a9a0fcee840fc84e
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 914
|
py
|
client_nation_change_helper.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/nation_change_helpers/client_nation_change_helper.py
from helpers import dependency
from nation_change.nation_change_helpers import iterVehTypeCDsInNationGroup, isMainInNationGroup
from skeletons.gui.shared import IItemsCache
def getValidVehicleCDForNationChange(vehCompDescr):
tempVehCD = vehCompDescr
vehicle = _getItem(vehCompDescr)
if vehicle.hasNationGroup:
if vehicle.isInInventory:
if not vehicle.activeInNationGroup:
tempVehCD = iterVehTypeCDsInNationGroup(vehCompDescr).next()
elif not isMainInNationGroup(vehCompDescr):
tempVehCD = iterVehTypeCDsInNationGroup(vehCompDescr).next()
return tempVehCD
@dependency.replace_none_kwargs(itemsCache=IItemsCache)
def _getItem(itemID, itemsCache=None):
return itemsCache.items.getItemByCD(itemID)
|
42fbad32f888dc886a0132f057a7b6e23e4f3751
|
e78a66b3201cc5f6b9fd9a896067c616d97fd669
|
/src/toil/utils/toilDebugFile.py
|
e53543d28c12fe8bbf932863a01b713d3fc0ff3a
|
[
"Apache-2.0"
] |
permissive
|
DataBiosphere/toil
|
2ba3c223ca673818188b07feab01268b1104d253
|
87f858d693518d0f0f23cbb4f898cd14b824d843
|
refs/heads/master
| 2023-09-03T23:52:38.121916
| 2023-08-31T13:57:59
| 2023-08-31T13:57:59
| 33,148,320
| 416
| 158
|
Apache-2.0
| 2023-09-13T07:26:38
| 2015-03-30T21:08:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,468
|
py
|
toilDebugFile.py
|
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Debug tool for copying files contained in a toil jobStore."""
import argparse
import logging
import os.path
from typing import Optional
from toil.common import Config, Toil, parser_with_common_options
from toil.jobStores.abstractJobStore import AbstractJobStore
from toil.lib.resources import glob
from toil.statsAndLogging import set_logging_from_options
logger = logging.getLogger(__name__)
def fetchJobStoreFiles(jobStore: AbstractJobStore, options: argparse.Namespace) -> None:
"""
Takes a list of file names as glob patterns, searches for these within a
given directory, and attempts to take all of the files found and copy them
into options.localFilePath.
:param jobStore: A fileJobStore object.
:param options.fetch: List of file glob patterns to search
for in the jobStore and copy into options.localFilePath.
:param options.localFilePath: Local directory to copy files into.
:param options.jobStore: The path to the jobStore directory.
"""
for jobStoreFile in options.fetch:
jobStoreHits = glob(directoryname=options.jobStore,
glob_pattern=jobStoreFile)
for jobStoreFileID in jobStoreHits:
logger.debug(f"Copying job store file: {jobStoreFileID} to {options.localFilePath[0]}")
jobStore.read_file(jobStoreFileID,
os.path.join(options.localFilePath[0],
os.path.basename(jobStoreFileID)),
symlink=options.useSymlinks)
def printContentsOfJobStore(jobStorePath: str, nameOfJob: Optional[str] = None) -> None:
"""
Fetch a list of all files contained in the jobStore directory input if
nameOfJob is not declared, otherwise it only prints out the names of files
for that specific job for which it can find a match. Also creates a logFile
containing this same record of job files in the working directory.
:param jobStorePath: Directory path to recursively look for files.
:param nameOfJob: Default is None, which prints out all files in the jobStore.
If specified, it will print all jobStore files that have been written to the
jobStore by that job.
"""
if nameOfJob:
glob_pattern = "*" + nameOfJob + "*"
logFile = nameOfJob + "_fileset.txt"
else:
glob_pattern = "*"
logFile = "jobstore_files.txt"
nameOfJob = ""
list_of_files = glob(directoryname=jobStorePath, glob_pattern=glob_pattern)
if os.path.exists(logFile):
os.remove(logFile)
for gfile in sorted(list_of_files):
if not gfile.endswith('.new'):
logger.debug(f"{nameOfJob} File: {os.path.basename(gfile)}")
with open(logFile, "a+") as f:
f.write(os.path.basename(gfile))
f.write("\n")
def main() -> None:
parser = parser_with_common_options(jobstore_option=True)
parser.add_argument("--localFilePath",
nargs=1,
help="Location to which to copy job store files.")
parser.add_argument("--fetch",
nargs="+",
help="List of job-store files to be copied locally."
"Use either explicit names (i.e. 'data.txt'), or "
"specify glob patterns (i.e. '*.txt')")
parser.add_argument("--listFilesInJobStore",
help="Prints a list of the current files in the jobStore.")
parser.add_argument("--fetchEntireJobStore",
help="Copy all job store files into a local directory.")
parser.add_argument("--useSymlinks",
help="Creates symlink 'shortcuts' of files in the localFilePath"
" instead of hardlinking or copying, where possible. If this is"
" not possible, it will copy the files (shutil.copyfile()).")
# Load the jobStore
options = parser.parse_args()
set_logging_from_options(options)
config = Config()
config.setOptions(options)
jobStore = Toil.resumeJobStore(config.jobStore)
logger.debug("Connected to job store: %s", config.jobStore)
if options.fetch:
# Copy only the listed files locally
logger.debug("Fetching local files: %s", options.fetch)
fetchJobStoreFiles(jobStore=jobStore, options=options)
elif options.fetchEntireJobStore:
# Copy all jobStore files locally
logger.debug("Fetching all local files.")
options.fetch = "*"
fetchJobStoreFiles(jobStore=jobStore, options=options)
if options.listFilesInJobStore:
# Log filenames and create a file containing these names in cwd
printContentsOfJobStore(jobStorePath=options.jobStore)
if __name__ == "__main__":
main()
|
73187ab2043bb32d8f83d1ae4cc1aafe8ce4c5b7
|
a29afc1d7342271ecfd2f4952c859a7a6e665a7a
|
/tests/operation/test_bump_sequence.py
|
541ef5d535e996184c73e8580bacc44fd0c90d6d
|
[
"Apache-2.0"
] |
permissive
|
StellarCN/py-stellar-base
|
20252abb8ae90b20ac4d7a071046b52a8ccfb273
|
259ae05ca8155bd1e09fc5d83b8f6c6431eedf31
|
refs/heads/main
| 2023-09-02T17:18:18.158221
| 2023-07-21T02:47:10
| 2023-07-21T02:47:10
| 43,143,745
| 365
| 205
|
Apache-2.0
| 2023-09-14T02:08:18
| 2015-09-25T13:26:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
test_bump_sequence.py
|
import pytest
from stellar_sdk import BumpSequence, Operation
from . import *
class TestBumpSequence:
@pytest.mark.parametrize(
"bump_to, source, xdr",
[
pytest.param(
1234567890, None, "AAAAAAAAAAsAAAAASZYC0g==", id="without_source"
),
pytest.param(
1234567890,
kp1.public_key,
"AAAAAQAAAABiXz1Zw/ieWRoG2l4IxdbkvfDRUDq5wyKBSUnrCR5doQAAAAsAAAAASZYC0g==",
id="with_source_public_key",
),
pytest.param(
1234567890,
muxed1,
"AAAAAQAAAQAAAAAAAAAAAWJfPVnD+J5ZGgbaXgjF1uS98NFQOrnDIoFJSesJHl2hAAAACwAAAABJlgLS",
id="with_source_muxed_account",
),
pytest.param(
1234567890,
muxed1.account_muxed,
"AAAAAQAAAQAAAAAAAAAAAWJfPVnD+J5ZGgbaXgjF1uS98NFQOrnDIoFJSesJHl2hAAAACwAAAABJlgLS",
id="with_source_muxed_account_strkey",
),
pytest.param(
0,
kp1.public_key,
"AAAAAQAAAABiXz1Zw/ieWRoG2l4IxdbkvfDRUDq5wyKBSUnrCR5doQAAAAsAAAAAAAAAAA==",
id="bump_to_0",
),
],
)
def test_xdr(self, bump_to, source, xdr):
op = BumpSequence(bump_to, source)
assert op.bump_to == bump_to
check_source(op.source, source)
xdr_object = op.to_xdr_object()
assert xdr_object.to_xdr() == xdr
assert Operation.from_xdr_object(xdr_object) == op
|
31a244d660fba05787ea9bb1dfa6162226472956
|
20dda4f19ec777d1a69ae20b5e2a48b9b28bb4a4
|
/flexbe_testing/src/flexbe_testing/test_interface.py
|
78331aaa51f2a71b7cf17599bb2ebe86248c3702
|
[] |
permissive
|
team-vigir/flexbe_behavior_engine
|
fd94ac2b75bfef6ca318d700d94b76f16cfd6552
|
6028c8585d852be55f4512024dcca5caa53e57c2
|
refs/heads/main
| 2023-05-12T20:25:50.388882
| 2022-03-09T22:19:43
| 2022-03-09T22:19:43
| 38,892,260
| 131
| 72
|
BSD-3-Clause
| 2023-06-23T03:06:37
| 2015-07-10T17:06:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,835
|
py
|
test_interface.py
|
#!/usr/bin/env python
import inspect
import rospy
from flexbe_core.core import EventState
from .logger import Logger
class TestInterface(object):
""" Interface to states and behaviors that are subject to testing. """
def __init__(self, path, classname):
package = __import__(path, fromlist=[path])
clsmembers = inspect.getmembers(package, lambda member: (
inspect.isclass(member) and member.__module__ == package.__name__
))
self._class = next(c for name, c in clsmembers if name == classname)
self._instance = None
Logger.print_positive('%s imported' % self.get_base_name())
def is_state(self):
return issubclass(self._class, EventState)
def get_base_name(self):
return "state" if self.is_state() else "behavior"
# instantiate
def instantiate(self, params=None):
if self.is_state():
self._instance = self._instantiate_state(params=params)
else:
self._instance = self._instantiate_behavior(params=params)
Logger.print_positive('%s instantiated' % self.get_base_name())
def _instantiate_state(self, params=None):
if params is None:
return self._class()
else:
return self._class(**params)
def _instantiate_behavior(self, params=None):
be = self._class()
if params is not None:
for name, value in params.items():
be.set_parameter(name, value)
be.set_up(id=0, autonomy_level=255, debug=False)
return be
# execute
def execute(self, userdata, spin_cb=None):
spin_cb = spin_cb or (lambda: None)
if self.is_state():
outcome = self._execute_state(userdata, spin_cb)
else:
outcome = self._execute_behavior(userdata, spin_cb)
Logger.print_positive('finished %s execution' % self.get_base_name())
return outcome
def _execute_state(self, userdata, spin_cb):
self._instance.on_start()
outcome = None
while outcome is None and not rospy.is_shutdown():
outcome = self._instance.execute(userdata)
self._instance.sleep()
spin_cb()
self._instance.on_stop()
return outcome
def _execute_behavior(self, userdata, spin_cb):
self._instance.prepare_for_execution(userdata._data)
self._instance.confirm()
# do not execute behavior directly, instead explicitly spin its state machine
# this is required here for spinning ROS and processing roslaunch context callbacks
outcome = None
sm = self._instance._state_machine
while outcome is None and not rospy.is_shutdown():
outcome = sm.execute(userdata)
sm.sleep()
spin_cb()
return outcome
|
3237ec989abfa427232943b3c1be854b83af2452
|
88efd76316e4184d76a5e0585d95fe734233942c
|
/yellowbrick/features/__init__.py
|
7b7fc0fc212de7fee17535fa836d999c13282d90
|
[
"Apache-2.0"
] |
permissive
|
DistrictDataLabs/yellowbrick
|
1ecd9f33e58f0d007569904401c204a6cdeb5661
|
f7a8e950bd31452ea2f5d402a1c5d519cd163fd5
|
refs/heads/develop
| 2023-08-03T12:25:26.511916
| 2023-07-05T18:14:28
| 2023-07-05T18:14:28
| 59,121,694
| 4,242
| 660
|
Apache-2.0
| 2023-07-15T17:50:31
| 2016-05-18T14:12:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
__init__.py
|
# yellowbrick.features
# Visualizers for feature analysis and diagnostics.
#
# Author: Benjamin Bengfort
# Created: Mon Oct 03 21:30:18 2016 -0400
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: __init__.py [0f4b236] benjamin@bengfort.com $
"""
Visualizers for feature analysis and diagnostics.
"""
##########################################################################
## Imports
##########################################################################
## Hoist visualizers into the features namespace
from .pcoords import ParallelCoordinates, parallel_coordinates
from .radviz import RadialVisualizer, RadViz, radviz
from .rankd import Rank1D, rank1d, Rank2D, rank2d
from .jointplot import JointPlot, JointPlotVisualizer, joint_plot
from .pca import PCA, PCADecomposition, pca_decomposition
from .manifold import Manifold, manifold_embedding
# Alias the TargetType defined in yellowbrick.utils.target
from yellowbrick.utils.target import TargetType
# RFECV and Feature Importances moved to model selection module as of YB v1.0
from yellowbrick.model_selection.rfecv import RFECV, rfecv
from yellowbrick.model_selection.importances import FeatureImportances
from yellowbrick.model_selection.importances import feature_importances
|
f6f9159beb76a235649b94f91859bb823cf90114
|
cda44e80665ce5a7d592d811d633358d48d207c5
|
/src/main.py
|
f1ed8257bf58d1bf7e320c3c297d34f6641f4869
|
[
"MIT"
] |
permissive
|
kovinevmv/getcontact
|
fe996a311f965321c09df6acd4e47acdc7ddba18
|
4461f7f6e92e4654489736b087824ce9662771da
|
refs/heads/master
| 2022-08-10T16:21:37.569765
| 2022-07-09T16:38:14
| 2022-07-09T16:38:14
| 232,601,679
| 347
| 80
|
MIT
| 2022-03-12T16:53:43
| 2020-01-08T16:03:27
|
Python
|
UTF-8
|
Python
| false
| false
| 900
|
py
|
main.py
|
import argparse
from getcontact.getcontact import GetContactAPI
from getcontact.config import config
description = "Get information about phone number from databases of GetContact"
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-p",
"--phone",
help="Phone number (example: +78005553535)")
parser.add_argument("-j",
"--json",
help="Print output in JSON format",
action="store_true")
parser.add_argument("-v", "--verbose", help="Log", action="store_true")
args = parser.parse_args()
phone = args.phone
getcontact = GetContactAPI()
config.VERBOSE = args.verbose
if args.json:
print(getcontact.get_information_by_phone(phone))
else:
getcontact.print_information_by_phone(phone)
|
d0455acadd0fc2c360e6363bff1ef525831f1e3a
|
bc028a1cbc3cfab47b1d5a2df31bee8946881fb4
|
/Firewall/BUZZDIRECTION/BUZZ_1210/SeconddateCnC/noarch/create_dns_injection.py
|
a0c0aefdaf1eccec2a9b5ae2be26f3a9b9637bd6
|
[] |
no_license
|
nneonneo/eqgrp-free-file
|
58a427b0a9716e7a24de878b1b7a8f06b3d92212
|
49e457d7ac870d5e00f5a247b94476cd5643f3ba
|
refs/heads/master
| 2021-01-19T01:24:37.276779
| 2016-08-16T22:50:57
| 2016-08-16T22:56:14
| 65,761,779
| 211
| 195
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,060
|
py
|
create_dns_injection.py
|
#!/usr/bin/env python
from optparse import OptionParser, Option, OptionValueError
import copy
import re
import socket
import struct
import sys
from optparse import IndentedHelpFormatter
import textwrap
Flags = {
'AA' : 10,
'TC' : 9,
'RA' : 7,
}
Qtypes = {
'A' : 1,
'NS' : 2,
'CNAME' : 5,
'PTR' : 12,
}
class IndentedHelpFormatterWithNL(IndentedHelpFormatter):
def format_description(self, description):
if not description:
return ''
desc_width = self.width - self.current_indent
indent = ' ' * self.current_indent
bits = description.split('\n')
formatted_bits = [
textwrap.fill(
bit,
desc_width,
initial_indent=indent,
subsequent_indent=indent)
for bit in bits]
return '\n'.join(formatted_bits) + '\n'
class DNSRecord:
def __init__(self, name, type, ttl, data):
if len(name) == 0 or len(name) > 253:
raise ValueError
if type == 'PTR' and not self.is_valid_ip(name) or \
type != 'PTR' and not self.is_valid_name(name):
raise ValueError
type = type.upper()
if not type in Qtypes.keys():
raise ValueError
ttl = int(ttl)
if 0 > ttl:
raise ValueError
if len(data) == 0 or len(data) > 253:
raise ValueError
if type == 'A' and not self.is_valid_ip(data) or \
type != 'A' and not self.is_valid_name(data):
raise ValueError
self.name = name
self.type = type
self.ttl = ttl
self.data = data
def is_valid_name(self, name):
result = True
for s in name.split('.'):
if len(s) > 63:
result = False
break
return result
def is_valid_ip(self, addr):
result = True
if len(addr.split('.')) != 4:
result = False
for s in addr.split('.'):
if len(s) > 3:
result = False
break
try:
socket.inet_aton(addr)
except socket.error:
result = False
return result
def check_DNSRecord(option, opt, value):
try:
(name, type, ttl, data) = value.split(',')
return DNSRecord(name, type, ttl, data)
except ValueError:
raise OptionValueError(
'%s %r invalid; expected: Name,Type,TTL,Data' % (opt, value))
class ExtendedOption(Option):
TYPES = Option.TYPES + ('DNSRecord',)
TYPE_CHECKER = copy.copy(Option.TYPE_CHECKER)
TYPE_CHECKER['DNSRecord'] = check_DNSRecord
def parse_arguments(parser):
parser.set_defaults(flags=[])
parser.set_defaults(question='')
parser.set_defaults(compression=False)
parser.set_defaults(authorities=[])
parser.set_defaults(additionals=[])
parser.add_option(
'-o', '--outfile',
action='store', type='string', dest='outfile',
help='Output file name (optional). By default the resulting data is written to stdout.')
parser.add_option(
'-f', '--flag',
action='append', type='choice', choices=Flags.keys(), dest='flags',
help='Header flags to set: %s (optional).' % Flags.keys())
parser.add_option(
'-a', '--answer',
action='append', type='DNSRecord', dest='answers',
help='DNS answer section resource record (at least one required). Format: Name,Type,TTL,Data')
parser.add_option(
'-u', '--authority',
action='append', type='DNSRecord', dest='authorities',
help='DNS authority section resource record (optional). Format: Name,Type,TTL,Data')
parser.add_option(
'-d', '--additional',
action='append', type='DNSRecord', dest='additionals',
help='DNS additional section resource record (optional). Format: Name,Type,TTL,Data')
parser.add_option('-q', '--question',
action='store', type='string', dest='question',
help='Name field from the DNS question section (required when compression is enabled).')
parser.add_option(
'-c', '--compress',
action='store_true', dest='compression',
help='Compress the given names and data using standard DNS compression.')
return parser.parse_args()
class DNSCompressionData:
def __init__(self, offset, question):
self.data = [(offset, question)]
def add(self, offset, name):
self.data.append(
(offset + self.data[0][0]+len(self.data[0][1]) + 5, name))
def find(self, name):
for d in self.data:
if d[1].endswith(name):
return d[0] + (len(d[1]) - len(name))
return -1
class DNSInjection:
def __init__(self, flags, question='', compression=False):
self.sections = ''
self.flags = flags
self.acount = 0
self.ucount = 0
self.dcount = 0
self.compression = compression
self.compressor = DNSCompressionData(12, '.' + question)
def add_section(self, section, record):
if section == 'a':
self.acount += 1
elif section == 'u':
self.ucount += 1
elif section == 'd':
self.dcount += 1
else:
return
if record.type == 'PTR':
fixed = ''
for s in reversed(record.name.split('.')):
fixed += s + '.'
record.name = fixed + 'in-addr.arpa'
if self.compression == True and not 'OFFSET' in record.name:
record.name = self.compress(record.name)
name = self.encode_name(record.name)
type = Qtypes[record.type]
ttl = record.ttl
self.sections += struct.pack('>%dsHHL' % (len(name)),
name, type, 1, ttl)
if record.type == 'A':
data = self.encode_address(record.data)
else:
if self.compression == True and not 'OFFSET' in record.data:
record.data = self.compress(record.data, is_data=True)
data = self.encode_name(record.data)
self.sections += struct.pack('>H%ds' % (len(data)),
len(data), data)
def encode_address(self, address):
packed = ''
for section in address.split('.'):
packed += struct.pack('>B',
int(section))
return packed
def encode_name(self, name):
packed = ''
for section in name.split('.'):
pointer = re.match('^(.*)OFFSET:(\d+)$', section)
if pointer:
if len(pointer.groups()[0]) != 0:
packed += struct.pack('>B%ds' % len(pointer.groups()[0]),
len(pointer.groups()[0]), pointer.groups()[0])
return struct.pack('>%dsH' % len(packed),
packed, 49152 + int(pointer.groups()[1]))
packed += struct.pack('>B%ds' % len(section),
len(section), section)
return struct.pack('>%dsB' % len(packed),
packed, 0)
def compress(self, name, is_data=False):
s = '.' + name
t = ''
compressed = name
while True:
offset = self.compressor.find(s)
if offset != -1:
compressed = t + 'OFFSET:' + str(offset)
break
i = s.find('.', 1)
if i == -1:
break
t = t + s[:i]
s = s[i:]
if is_data == False:
self.compressor.add(len(self.sections), '.' + name)
else:
self.compressor.add(len(self.sections) + 2, '.' + name)
if compressed.startswith('.'):
compressed = compressed[1:]
return compressed
def add_answer(self, record):
self.add_section('a', record)
def add_authority(self, record):
self.add_section('u', record)
def add_additional(self, record):
self.add_section('d', record)
def finish(self):
bits = 0
for flag in self.flags:
bits |= 1 << Flags[flag]
header = struct.pack('>HHHHHH',
0, bits, 0, self.acount, self.ucount, self.dcount)
return header + self.sections
def main():
description = """
Generates DNS injections in the format required by SECONDDATE. Multiple
answer, authority, and additional resource records can be specified and
will be used in the given order.
Each resource record should take the form:
Name,Type,TTL,Data
Name: A hostname: 'host.network.com', a decimal numeric offset within
the final packet 'OFFSET:12' (a pointer to the beginning of the
query name), or a combination: 'otherOFFSET:17' (a pointer to
.network.com if the query name is [any 4 characters].network.com.
If the record type is 'PTR' then only the IP address is
necessary. Given this record type, names of the form 'w.x.y.z'
will be automatically translated to 'z.y.x.w.in-addr.arpa'.
Type: Abbreviation of the DNS record type. Supported types
A, NS, CNAME, and PTR
TTL: The record's time to live in decimal seconds. This is the time
that the record should remain valid if cached.
Data: A domain name or an IP address, chosen appropriately for the
given record type. Domain name types: NS, CNAME, PTR; IP
address types: A.
The flags option can be specified multiple times and allows setting
server controlled flag values:
AA: The answer as authoritative.
TC: The answer has been truncated.
RA: Recursion is available.
Although the question portion of the DNS packet is not necessary, if it is
known during rule creation the strings in the packet can be optimized by
using DNS compression. The question (or at an absolute mimimum an arbitrary
string of the same length as the real question) must be provided in order to
enable the compression.
Examples
Simple, single answer of 192.168.1.1 to any DNS query that should
not be cached due to a 0 second TTL:
./create_dns_injection.py -a OFFSET:12,A,0,192.168.1.1
Complex, multiple answer reply using CNAMEs with a 30 minute TTL:
./create_dns_injection.py -a OFFSET:12,CNAME,1800,www.badguy.net \\
-a www.badguy.net,CNAME,1800,host.badguy.net \\
-a host.badguy.net,A,1800,192.168.1.1
Similar to the previous example, but adds authority and additional
sections, sets to the initial query, and enables compression:
./create_dns_injection.py -a update.domain.com,CNAME,1800,www.badguy.net \\
-a www.badguy.net,CNAME,1800,host.badguy.net \\
-a host.badguy.net,A,1800,192.168.1.1 \\
-u badguy.net,NS,1800,ns.badguy.net \\
-d ns.badguy.net,A,1800,192.168.1.1 \\
-q update.domain.com \\
-c
"""
parser = OptionParser(
option_class=ExtendedOption,
usage="%prog -a | --answer name,type,ttl,data [ ... options ... ]",
version="%prog 1.1",
description=description,
formatter=IndentedHelpFormatterWithNL())
(options, args) = parse_arguments(parser)
# need at least one answer
if not options.answers:
parser.error("at least one -a or --answer required")
# need to know the length of the question to compress
if True == options.compression and '' == options.question:
parser.error("-q or --question required for compression")
injection = DNSInjection(
options.flags,
question=options.question,
compression=options.compression)
for r in options.answers:
injection.add_answer(r)
for r in options.authorities:
injection.add_authority(r)
for r in options.additionals:
injection.add_additional(r)
output = injection.finish()
if options.outfile:
file = open(options.outfile, 'wb')
file.write(output)
file.close()
else:
sys.stdout.write(output)
if __name__ == '__main__':
main()
|
a6fd960be6f96324f1d65ce38d5888ea693d2760
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-py/tests/testdir_parser/pyunit_hexdev_497_import_gzip_airlines.py
|
6eb62836dfd2bea3637bc2a3ffa6ed9516e66ef7
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,343
|
py
|
pyunit_hexdev_497_import_gzip_airlines.py
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
# This test is to make sure that we have fixed the following issue properly using airlines data:
# https://github.com/h2oai/private-h2o-3/issues/341: Merged Gzip Files not read properly.
# I will import the original files and then the zip directory and compare them to see if they are the same.
def import_folder():
tol_time = 200 # comparing in ms or ns for timestamp columns
tol_numeric = 1e-5 # tolerance for comparing other numeric fields
numElements2Compare = 0 # choose number of elements per column to compare. Save test time.
multi_file_gzip_comp = h2o.import_file(path=pyunit_utils.locate("smalldata/parser/hexdev_497/airlines_small_csv.zip"))
multi_file_csv = \
h2o.import_file(path=pyunit_utils.locate("smalldata/parser/hexdev_497/airlines_small_csv/all_airlines.csv"))
# make sure H2O frames built from a zip file of a directory and the original files are the same.
assert pyunit_utils.compare_frames(multi_file_csv, multi_file_gzip_comp, numElements2Compare, tol_time, tol_numeric,
True), "H2O frame parsed from zip directory and unzipped directory are different!"
if __name__ == "__main__":
pyunit_utils.standalone_test(import_folder)
else:
import_folder()
|
e365db06ed4b6063781c738b48fec822c7b548f0
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowAaaMemory/cli/equal/golden_output_expected.py
|
b7b535af10f9e0e6d683cdbda30a1056afc6147d
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,420
|
py
|
golden_output_expected.py
|
expected_output = {
'aaa_memory': {
'AAA Acct AVLnod': {
'alloc_name': 'AAA Acct AVLnod',
'allocated': 4556,
'chunk': 'Chunk',
'count': 3,
'in_use': 84,
'percentage': 1,
},
'AAA Acct DB chu': {
'alloc_name': 'AAA Acct DB chu',
'allocated': 31412,
'chunk': 'Chunk',
'count': 3,
'in_use': 564,
'percentage': 1,
},
'AAA Acct Rec ch': {
'alloc_name': 'AAA Acct Rec ch',
'allocated': 10248,
'chunk': 'Chunk',
'count': 3,
'in_use': 252,
'percentage': 2,
},
'AAA AttrL Hdr': {
'alloc_name': 'AAA AttrL Hdr',
'allocated': 65756,
'chunk': 'Chunk',
'count': 23,
'in_use': 736,
'percentage': 1,
},
'AAA AttrL Sub': {
'alloc_name': 'AAA AttrL Sub',
'allocated': 65756,
'chunk': 'Chunk',
'count': 23,
'in_use': 6256,
'percentage': 9,
},
'AAA DB Elt Chun': {
'alloc_name': 'AAA DB Elt Chun',
'allocated': 65756,
'chunk': 'Chunk',
'count': 3,
'in_use': 228,
'percentage': 0,
},
'AAA General DB': {
'alloc_name': 'AAA General DB',
'allocated': 2256,
'chunk': '',
'count': 3,
'in_use': 2100,
'percentage': 93,
},
'AAA Huge Chunk': {
'alloc_name': 'AAA Huge Chunk',
'allocated': 65756,
'chunk': 'Chunk',
'count': 0,
'in_use': 0,
'percentage': 0,
},
'AAA Interface Struct': {
'alloc_name': 'AAA Interface Struct',
'allocated': 1116,
'chunk': '',
'count': 3,
'in_use': 960,
'percentage': 86,
},
'AAA Large Chunk': {
'alloc_name': 'AAA Large Chunk',
'allocated': 65756,
'chunk': 'Chunk',
'count': 0,
'in_use': 0,
'percentage': 0,
},
'AAA String Malloc': {
'alloc_name': 'AAA String Malloc',
'allocated': 240,
'chunk': '',
'count': 1,
'in_use': 188,
'percentage': 78,
},
'AAA Unique Id Hash Table': {
'alloc_name': 'AAA Unique Id Hash Table',
'allocated': 4148,
'chunk': '',
'count': 1,
'in_use': 4096,
'percentage': 98,
},
'AAA chunk': {
'alloc_name': 'AAA chunk',
'allocated': 17800,
'chunk': 'Chunk',
'count': 0,
'in_use': 0,
'percentage': 0,
},
},
'low_memory': {
'acc_threshold': 2,
'auth_threshold': 3,
'coa_pkt_drop': 0,
'local_server_pkt_drop': 0,
'pod_pkt_drop': 0,
'unique_id_failure': 0,
},
'total_allocated': {
'total_bytes': 418356,
'total_kb': 408,
'total_mb': 0.398,
},
}
|
f5fcfd0bb577852f0254579d43b1927cd6de0b6a
|
7343ece3b82ac87a594865c4074623b45b0297b4
|
/tests/handlers/test_room_summary.py
|
d907fcaf04bd9f9f73cdd49077d467f1c925a6cc
|
[
"Apache-2.0"
] |
permissive
|
matrix-org/synapse
|
a00111f83310783b78e2996557f8bbae4d9fb229
|
d35bed8369514fe727b4fe1afb68f48cc8b2655a
|
refs/heads/develop
| 2023-09-05T05:24:20.808942
| 2023-09-04T16:14:09
| 2023-09-04T16:14:09
| 22,844,864
| 12,215
| 2,869
|
Apache-2.0
| 2023-09-14T15:20:48
| 2014-08-11T15:51:42
|
Python
|
UTF-8
|
Python
| false
| false
| 41,588
|
py
|
test_room_summary.py
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
from unittest import mock
from twisted.internet.defer import ensureDeferred
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import (
EventContentFields,
EventTypes,
HistoryVisibility,
JoinRules,
Membership,
RestrictedJoinRuleTypes,
RoomTypes,
)
from synapse.api.errors import AuthError, NotFoundError, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.events import make_event_from_dict
from synapse.federation.transport.client import TransportLayerClient
from synapse.handlers.room_summary import _child_events_comparison_key, _RoomEntry
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
from synapse.types import JsonDict, UserID, create_requester
from synapse.util import Clock
from tests import unittest
def _create_event(
room_id: str, order: Optional[Any] = None, origin_server_ts: int = 0
) -> mock.Mock:
result = mock.Mock(name=room_id)
result.room_id = room_id
result.content = {}
result.origin_server_ts = origin_server_ts
if order is not None:
result.content["order"] = order
return result
def _order(*events: mock.Mock) -> List[mock.Mock]:
return sorted(events, key=_child_events_comparison_key)
class TestSpaceSummarySort(unittest.TestCase):
def test_no_order_last(self) -> None:
"""An event with no ordering is placed behind those with an ordering."""
ev1 = _create_event("!abc:test")
ev2 = _create_event("!xyz:test", "xyz")
self.assertEqual([ev2, ev1], _order(ev1, ev2))
def test_order(self) -> None:
"""The ordering should be used."""
ev1 = _create_event("!abc:test", "xyz")
ev2 = _create_event("!xyz:test", "abc")
self.assertEqual([ev2, ev1], _order(ev1, ev2))
def test_order_origin_server_ts(self) -> None:
"""Origin server is a tie-breaker for ordering."""
ev1 = _create_event("!abc:test", origin_server_ts=10)
ev2 = _create_event("!xyz:test", origin_server_ts=30)
self.assertEqual([ev1, ev2], _order(ev1, ev2))
def test_order_room_id(self) -> None:
"""Room ID is a final tie-breaker for ordering."""
ev1 = _create_event("!abc:test")
ev2 = _create_event("!xyz:test")
self.assertEqual([ev1, ev2], _order(ev1, ev2))
def test_invalid_ordering_type(self) -> None:
"""Invalid orderings are considered the same as missing."""
ev1 = _create_event("!abc:test", 1)
ev2 = _create_event("!xyz:test", "xyz")
self.assertEqual([ev2, ev1], _order(ev1, ev2))
ev1 = _create_event("!abc:test", {})
self.assertEqual([ev2, ev1], _order(ev1, ev2))
ev1 = _create_event("!abc:test", [])
self.assertEqual([ev2, ev1], _order(ev1, ev2))
ev1 = _create_event("!abc:test", True)
self.assertEqual([ev2, ev1], _order(ev1, ev2))
def test_invalid_ordering_value(self) -> None:
"""Invalid orderings are considered the same as missing."""
ev1 = _create_event("!abc:test", "foo\n")
ev2 = _create_event("!xyz:test", "xyz")
self.assertEqual([ev2, ev1], _order(ev1, ev2))
ev1 = _create_event("!abc:test", "a" * 51)
self.assertEqual([ev2, ev1], _order(ev1, ev2))
class SpaceSummaryTestCase(unittest.HomeserverTestCase):
servlets = [
admin.register_servlets_for_client_rest_resource,
room.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.hs = hs
self.handler = self.hs.get_room_summary_handler()
# Create a user.
self.user = self.register_user("user", "pass")
self.token = self.login("user", "pass")
# Create a space and a child room.
self.space = self.helper.create_room_as(
self.user,
tok=self.token,
extra_content={
"creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
},
)
self.room = self.helper.create_room_as(self.user, tok=self.token)
self._add_child(self.space, self.room, self.token)
def _add_child(
self,
space_id: str,
room_id: str,
token: str,
order: Optional[str] = None,
via: Optional[List[str]] = None,
) -> None:
"""Add a child room to a space."""
if via is None:
via = [self.hs.hostname]
content: JsonDict = {"via": via}
if order is not None:
content["order"] = order
self.helper.send_state(
space_id,
event_type=EventTypes.SpaceChild,
body=content,
tok=token,
state_key=room_id,
)
def _assert_hierarchy(
self, result: JsonDict, rooms_and_children: Iterable[Tuple[str, Iterable[str]]]
) -> None:
"""
Assert that the expected room IDs are in the response.
Args:
result: The result from the API call.
rooms_and_children: An iterable of tuples where each tuple is:
The expected room ID.
The expected IDs of any children rooms.
"""
result_room_ids = []
result_children_ids = []
for result_room in result["rooms"]:
# Ensure federation results are not leaking over the client-server API.
self.assertNotIn("allowed_room_ids", result_room)
result_room_ids.append(result_room["room_id"])
result_children_ids.append(
[
(result_room["room_id"], cs["state_key"])
for cs in result_room["children_state"]
]
)
room_ids = []
children_ids = []
for room_id, children in rooms_and_children:
room_ids.append(room_id)
children_ids.append([(room_id, child_id) for child_id in children])
# Note that order matters.
self.assertEqual(result_room_ids, room_ids)
self.assertEqual(result_children_ids, children_ids)
def _poke_fed_invite(self, room_id: str, from_user: str) -> None:
"""
Creates a invite (as if received over federation) for the room from the
given hostname.
Args:
room_id: The room ID to issue an invite for.
fed_hostname: The user to invite from.
"""
# Poke an invite over federation into the database.
fed_handler = self.hs.get_federation_handler()
fed_hostname = UserID.from_string(from_user).domain
event = make_event_from_dict(
{
"room_id": room_id,
"event_id": "!abcd:" + fed_hostname,
"type": EventTypes.Member,
"sender": from_user,
"state_key": self.user,
"content": {"membership": Membership.INVITE},
"prev_events": [],
"auth_events": [],
"depth": 1,
"origin_server_ts": 1234,
}
)
self.get_success(
fed_handler.on_invite_request(fed_hostname, event, RoomVersions.V6)
)
def test_simple_space(self) -> None:
"""Test a simple space with a single room."""
# The result should have the space and the room in it, along with a link
# from space -> room.
expected = [(self.space, [self.room]), (self.room, ())]
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
self._assert_hierarchy(result, expected)
def test_large_space(self) -> None:
"""Test a space with a large number of rooms."""
rooms = [self.room]
# Make at least 51 rooms that are part of the space.
for _ in range(55):
room = self.helper.create_room_as(self.user, tok=self.token)
self._add_child(self.space, room, self.token)
rooms.append(room)
# The result should have the space and the rooms in it, along with the links
# from space -> room.
expected = [(self.space, rooms)] + [(room, []) for room in rooms]
# Make two requests to fully paginate the results.
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
result2 = self.get_success(
self.handler.get_room_hierarchy(
create_requester(self.user), self.space, from_token=result["next_batch"]
)
)
# Combine the results.
result["rooms"] += result2["rooms"]
self._assert_hierarchy(result, expected)
def test_visibility(self) -> None:
"""A user not in a space cannot inspect it."""
user2 = self.register_user("user2", "pass")
token2 = self.login("user2", "pass")
# The user can see the space since it is publicly joinable.
expected = [(self.space, [self.room]), (self.room, ())]
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(user2), self.space)
)
self._assert_hierarchy(result, expected)
# If the space is made invite-only, it should no longer be viewable.
self.helper.send_state(
self.space,
event_type=EventTypes.JoinRules,
body={"join_rule": JoinRules.INVITE},
tok=self.token,
)
self.get_failure(
self.handler.get_room_hierarchy(create_requester(user2), self.space),
AuthError,
)
# If the space is made world-readable it should return a result.
self.helper.send_state(
self.space,
event_type=EventTypes.RoomHistoryVisibility,
body={"history_visibility": HistoryVisibility.WORLD_READABLE},
tok=self.token,
)
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(user2), self.space)
)
self._assert_hierarchy(result, expected)
# Make it not world-readable again and confirm it results in an error.
self.helper.send_state(
self.space,
event_type=EventTypes.RoomHistoryVisibility,
body={"history_visibility": HistoryVisibility.JOINED},
tok=self.token,
)
self.get_failure(
self.handler.get_room_hierarchy(create_requester(user2), self.space),
AuthError,
)
# Join the space and results should be returned.
self.helper.invite(self.space, targ=user2, tok=self.token)
self.helper.join(self.space, user2, tok=token2)
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(user2), self.space)
)
self._assert_hierarchy(result, expected)
# Attempting to view an unknown room returns the same error.
self.get_failure(
self.handler.get_room_hierarchy(
create_requester(user2), "#not-a-space:" + self.hs.hostname
),
AuthError,
)
def test_room_hierarchy_cache(self) -> None:
"""In-flight room hierarchy requests are deduplicated."""
# Run two `get_room_hierarchy` calls up until they block.
deferred1 = ensureDeferred(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
deferred2 = ensureDeferred(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
# Complete the two calls.
result1 = self.get_success(deferred1)
result2 = self.get_success(deferred2)
# Both `get_room_hierarchy` calls should return the same result.
expected = [(self.space, [self.room]), (self.room, ())]
self._assert_hierarchy(result1, expected)
self._assert_hierarchy(result2, expected)
self.assertIs(result1, result2)
# A subsequent `get_room_hierarchy` call should not reuse the result.
result3 = self.get_success(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
self._assert_hierarchy(result3, expected)
self.assertIsNot(result1, result3)
def test_room_hierarchy_cache_sharing(self) -> None:
"""Room hierarchy responses for different users are not shared."""
user2 = self.register_user("user2", "pass")
# Make the room within the space invite-only.
self.helper.send_state(
self.room,
event_type=EventTypes.JoinRules,
body={"join_rule": JoinRules.INVITE},
tok=self.token,
)
# Run two `get_room_hierarchy` calls for different users up until they block.
deferred1 = ensureDeferred(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
deferred2 = ensureDeferred(
self.handler.get_room_hierarchy(create_requester(user2), self.space)
)
# Complete the two calls.
result1 = self.get_success(deferred1)
result2 = self.get_success(deferred2)
# The `get_room_hierarchy` calls should return different results.
self._assert_hierarchy(result1, [(self.space, [self.room]), (self.room, ())])
self._assert_hierarchy(result2, [(self.space, [self.room])])
def _create_room_with_join_rule(
self, join_rule: str, room_version: Optional[str] = None, **extra_content: Any
) -> str:
"""Create a room with the given join rule and add it to the space."""
room_id = self.helper.create_room_as(
self.user,
room_version=room_version,
tok=self.token,
extra_content={
"initial_state": [
{
"type": EventTypes.JoinRules,
"state_key": "",
"content": {
"join_rule": join_rule,
**extra_content,
},
}
]
},
)
self._add_child(self.space, room_id, self.token)
return room_id
def test_filtering(self) -> None:
"""
Rooms should be properly filtered to only include rooms the user has access to.
"""
user2 = self.register_user("user2", "pass")
token2 = self.login("user2", "pass")
# Create a few rooms which will have different properties.
public_room = self._create_room_with_join_rule(JoinRules.PUBLIC)
knock_room = self._create_room_with_join_rule(
JoinRules.KNOCK, room_version=RoomVersions.V7.identifier
)
not_invited_room = self._create_room_with_join_rule(JoinRules.INVITE)
invited_room = self._create_room_with_join_rule(JoinRules.INVITE)
self.helper.invite(invited_room, targ=user2, tok=self.token)
restricted_room = self._create_room_with_join_rule(
JoinRules.RESTRICTED,
room_version=RoomVersions.V8.identifier,
allow=[],
)
restricted_accessible_room = self._create_room_with_join_rule(
JoinRules.RESTRICTED,
room_version=RoomVersions.V8.identifier,
allow=[
{
"type": RestrictedJoinRuleTypes.ROOM_MEMBERSHIP,
"room_id": self.space,
"via": [self.hs.hostname],
}
],
)
world_readable_room = self._create_room_with_join_rule(JoinRules.INVITE)
self.helper.send_state(
world_readable_room,
event_type=EventTypes.RoomHistoryVisibility,
body={"history_visibility": HistoryVisibility.WORLD_READABLE},
tok=self.token,
)
joined_room = self._create_room_with_join_rule(JoinRules.INVITE)
self.helper.invite(joined_room, targ=user2, tok=self.token)
self.helper.join(joined_room, user2, tok=token2)
# Join the space.
self.helper.join(self.space, user2, tok=token2)
expected = [
(
self.space,
[
self.room,
public_room,
knock_room,
not_invited_room,
invited_room,
restricted_room,
restricted_accessible_room,
world_readable_room,
joined_room,
],
),
(self.room, ()),
(public_room, ()),
(knock_room, ()),
(invited_room, ()),
(restricted_accessible_room, ()),
(world_readable_room, ()),
(joined_room, ()),
]
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(user2), self.space)
)
self._assert_hierarchy(result, expected)
def test_complex_space(self) -> None:
"""
Create a "complex" space to see how it handles things like loops and subspaces.
"""
# Create an inaccessible room.
user2 = self.register_user("user2", "pass")
token2 = self.login("user2", "pass")
room2 = self.helper.create_room_as(user2, is_public=False, tok=token2)
# This is a bit odd as "user" is adding a room they don't know about, but
# it works for the tests.
self._add_child(self.space, room2, self.token)
# Create a subspace under the space with an additional room in it.
subspace = self.helper.create_room_as(
self.user,
tok=self.token,
extra_content={
"creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
},
)
subroom = self.helper.create_room_as(self.user, tok=self.token)
self._add_child(self.space, subspace, token=self.token)
self._add_child(subspace, subroom, token=self.token)
# Also add the two rooms from the space into this subspace (causing loops).
self._add_child(subspace, self.room, token=self.token)
self._add_child(subspace, room2, self.token)
# The result should include each room a single time and each link.
expected = [
(self.space, [self.room, room2, subspace]),
(self.room, ()),
(subspace, [subroom, self.room, room2]),
(subroom, ()),
]
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
self._assert_hierarchy(result, expected)
def test_pagination(self) -> None:
"""Test simple pagination works."""
room_ids = []
for i in range(1, 10):
room = self.helper.create_room_as(self.user, tok=self.token)
self._add_child(self.space, room, self.token, order=str(i))
room_ids.append(room)
# The room created initially doesn't have an order, so comes last.
room_ids.append(self.room)
result = self.get_success(
self.handler.get_room_hierarchy(
create_requester(self.user), self.space, limit=7
)
)
# The result should have the space and all of the links, plus some of the
# rooms and a pagination token.
expected: List[Tuple[str, Iterable[str]]] = [(self.space, room_ids)]
expected += [(room_id, ()) for room_id in room_ids[:6]]
self._assert_hierarchy(result, expected)
self.assertIn("next_batch", result)
# Check the next page.
result = self.get_success(
self.handler.get_room_hierarchy(
create_requester(self.user),
self.space,
limit=5,
from_token=result["next_batch"],
)
)
# The result should have the space and the room in it, along with a link
# from space -> room.
expected = [(room_id, ()) for room_id in room_ids[6:]]
self._assert_hierarchy(result, expected)
self.assertNotIn("next_batch", result)
def test_invalid_pagination_token(self) -> None:
"""An invalid pagination token, or changing other parameters, shoudl be rejected."""
room_ids = []
for i in range(1, 10):
room = self.helper.create_room_as(self.user, tok=self.token)
self._add_child(self.space, room, self.token, order=str(i))
room_ids.append(room)
# The room created initially doesn't have an order, so comes last.
room_ids.append(self.room)
result = self.get_success(
self.handler.get_room_hierarchy(
create_requester(self.user), self.space, limit=7
)
)
self.assertIn("next_batch", result)
# Changing the room ID, suggested-only, or max-depth causes an error.
self.get_failure(
self.handler.get_room_hierarchy(
create_requester(self.user), self.room, from_token=result["next_batch"]
),
SynapseError,
)
self.get_failure(
self.handler.get_room_hierarchy(
create_requester(self.user),
self.space,
suggested_only=True,
from_token=result["next_batch"],
),
SynapseError,
)
self.get_failure(
self.handler.get_room_hierarchy(
create_requester(self.user),
self.space,
max_depth=0,
from_token=result["next_batch"],
),
SynapseError,
)
# An invalid token is ignored.
self.get_failure(
self.handler.get_room_hierarchy(
create_requester(self.user), self.space, from_token="foo"
),
SynapseError,
)
def test_max_depth(self) -> None:
"""Create a deep tree to test the max depth against."""
spaces = [self.space]
rooms = [self.room]
for _ in range(5):
spaces.append(
self.helper.create_room_as(
self.user,
tok=self.token,
extra_content={
"creation_content": {
EventContentFields.ROOM_TYPE: RoomTypes.SPACE
}
},
)
)
self._add_child(spaces[-2], spaces[-1], self.token)
rooms.append(self.helper.create_room_as(self.user, tok=self.token))
self._add_child(spaces[-1], rooms[-1], self.token)
# Test just the space itself.
result = self.get_success(
self.handler.get_room_hierarchy(
create_requester(self.user), self.space, max_depth=0
)
)
expected: List[Tuple[str, Iterable[str]]] = [(spaces[0], [rooms[0], spaces[1]])]
self._assert_hierarchy(result, expected)
# A single additional layer.
result = self.get_success(
self.handler.get_room_hierarchy(
create_requester(self.user), self.space, max_depth=1
)
)
expected += [
(rooms[0], ()),
(spaces[1], [rooms[1], spaces[2]]),
]
self._assert_hierarchy(result, expected)
# A few layers.
result = self.get_success(
self.handler.get_room_hierarchy(
create_requester(self.user), self.space, max_depth=3
)
)
expected += [
(rooms[1], ()),
(spaces[2], [rooms[2], spaces[3]]),
(rooms[2], ()),
(spaces[3], [rooms[3], spaces[4]]),
]
self._assert_hierarchy(result, expected)
def test_unknown_room_version(self) -> None:
"""
If a room with an unknown room version is encountered it should not cause
the entire summary to skip.
"""
# Poke the database and update the room version to an unknown one.
self.get_success(
self.hs.get_datastores().main.db_pool.simple_update(
"rooms",
keyvalues={"room_id": self.room},
updatevalues={"room_version": "unknown-room-version"},
desc="updated-room-version",
)
)
# Invalidate method so that it returns the currently updated version
# instead of the cached version.
self.hs.get_datastores().main.get_room_version_id.invalidate((self.room,))
# The result should have only the space, along with a link from space -> room.
expected = [(self.space, [self.room])]
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
self._assert_hierarchy(result, expected)
def test_fed_complex(self) -> None:
"""
Return data over federation and ensure that it is handled properly.
"""
fed_hostname = self.hs.hostname + "2"
subspace = "#subspace:" + fed_hostname
subroom = "#subroom:" + fed_hostname
# Generate some good data, and some bad data:
#
# * Event *back* to the root room.
# * Unrelated events / rooms
# * Multiple levels of events (in a not-useful order, e.g. grandchild
# events before child events).
# Note that these entries are brief, but should contain enough info.
requested_room_entry = _RoomEntry(
subspace,
{
"room_id": subspace,
"world_readable": True,
"room_type": RoomTypes.SPACE,
},
[
{
"type": EventTypes.SpaceChild,
"room_id": subspace,
"state_key": subroom,
"content": {"via": [fed_hostname]},
}
],
)
child_room = {
"room_id": subroom,
"world_readable": True,
}
async def summarize_remote_room_hierarchy(
_self: Any, room: Any, suggested_only: bool
) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]:
return requested_room_entry, {subroom: child_room}, set()
# Add a room to the space which is on another server.
self._add_child(self.space, subspace, self.token)
expected = [
(self.space, [self.room, subspace]),
(self.room, ()),
(subspace, [subroom]),
(subroom, ()),
]
with mock.patch(
"synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room_hierarchy",
new=summarize_remote_room_hierarchy,
):
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
self._assert_hierarchy(result, expected)
def test_fed_filtering(self) -> None:
"""
Rooms returned over federation should be properly filtered to only include
rooms the user has access to.
"""
fed_hostname = self.hs.hostname + "2"
subspace = "#subspace:" + fed_hostname
# Create a few rooms which will have different properties.
public_room = "#public:" + fed_hostname
knock_room = "#knock:" + fed_hostname
not_invited_room = "#not_invited:" + fed_hostname
invited_room = "#invited:" + fed_hostname
restricted_room = "#restricted:" + fed_hostname
restricted_accessible_room = "#restricted_accessible:" + fed_hostname
world_readable_room = "#world_readable:" + fed_hostname
joined_room = self.helper.create_room_as(self.user, tok=self.token)
# Poke an invite over federation into the database.
self._poke_fed_invite(invited_room, "@remote:" + fed_hostname)
# Note that these entries are brief, but should contain enough info.
children_rooms = (
(
public_room,
{
"room_id": public_room,
"world_readable": False,
"join_rule": JoinRules.PUBLIC,
},
),
(
knock_room,
{
"room_id": knock_room,
"world_readable": False,
"join_rule": JoinRules.KNOCK,
},
),
(
not_invited_room,
{
"room_id": not_invited_room,
"world_readable": False,
"join_rule": JoinRules.INVITE,
},
),
(
invited_room,
{
"room_id": invited_room,
"world_readable": False,
"join_rule": JoinRules.INVITE,
},
),
(
restricted_room,
{
"room_id": restricted_room,
"world_readable": False,
"join_rule": JoinRules.RESTRICTED,
"allowed_room_ids": [],
},
),
(
restricted_accessible_room,
{
"room_id": restricted_accessible_room,
"world_readable": False,
"join_rule": JoinRules.RESTRICTED,
"allowed_room_ids": [self.room],
},
),
(
world_readable_room,
{
"room_id": world_readable_room,
"world_readable": True,
"join_rule": JoinRules.INVITE,
},
),
(
joined_room,
{
"room_id": joined_room,
"world_readable": False,
"join_rule": JoinRules.INVITE,
},
),
)
subspace_room_entry = _RoomEntry(
subspace,
{
"room_id": subspace,
"world_readable": True,
},
# Place each room in the sub-space.
[
{
"type": EventTypes.SpaceChild,
"room_id": subspace,
"state_key": room_id,
"content": {"via": [fed_hostname]},
}
for room_id, _ in children_rooms
],
)
async def summarize_remote_room_hierarchy(
_self: Any, room: Any, suggested_only: bool
) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]:
return subspace_room_entry, dict(children_rooms), set()
# Add a room to the space which is on another server.
self._add_child(self.space, subspace, self.token)
expected = [
(self.space, [self.room, subspace]),
(self.room, ()),
(
subspace,
[
public_room,
knock_room,
not_invited_room,
invited_room,
restricted_room,
restricted_accessible_room,
world_readable_room,
joined_room,
],
),
(public_room, ()),
(knock_room, ()),
(invited_room, ()),
(restricted_accessible_room, ()),
(world_readable_room, ()),
(joined_room, ()),
]
with mock.patch(
"synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room_hierarchy",
new=summarize_remote_room_hierarchy,
):
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
self._assert_hierarchy(result, expected)
def test_fed_invited(self) -> None:
"""
A room which the user was invited to should be included in the response.
This differs from test_fed_filtering in that the room itself is being
queried over federation, instead of it being included as a sub-room of
a space in the response.
"""
fed_hostname = self.hs.hostname + "2"
fed_room = "#subroom:" + fed_hostname
# Poke an invite over federation into the database.
self._poke_fed_invite(fed_room, "@remote:" + fed_hostname)
fed_room_entry = _RoomEntry(
fed_room,
{
"room_id": fed_room,
"world_readable": False,
"join_rule": JoinRules.INVITE,
},
)
async def summarize_remote_room_hierarchy(
_self: Any, room: Any, suggested_only: bool
) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]:
return fed_room_entry, {}, set()
# Add a room to the space which is on another server.
self._add_child(self.space, fed_room, self.token)
expected = [
(self.space, [self.room, fed_room]),
(self.room, ()),
(fed_room, ()),
]
with mock.patch(
"synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room_hierarchy",
new=summarize_remote_room_hierarchy,
):
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
self._assert_hierarchy(result, expected)
def test_fed_caching(self) -> None:
"""
Federation `/hierarchy` responses should be cached.
"""
fed_hostname = self.hs.hostname + "2"
fed_subspace = "#space:" + fed_hostname
fed_room = "#room:" + fed_hostname
# Add a room to the space which is on another server.
self._add_child(self.space, fed_subspace, self.token, via=[fed_hostname])
federation_requests = 0
async def get_room_hierarchy(
_self: TransportLayerClient,
destination: str,
room_id: str,
suggested_only: bool,
) -> JsonDict:
nonlocal federation_requests
federation_requests += 1
return {
"room": {
"room_id": fed_subspace,
"world_readable": True,
"room_type": RoomTypes.SPACE,
"children_state": [
{
"type": EventTypes.SpaceChild,
"room_id": fed_subspace,
"state_key": fed_room,
"content": {"via": [fed_hostname]},
},
],
},
"children": [
{
"room_id": fed_room,
"world_readable": True,
},
],
"inaccessible_children": [],
}
expected = [
(self.space, [self.room, fed_subspace]),
(self.room, ()),
(fed_subspace, [fed_room]),
(fed_room, ()),
]
with mock.patch(
"synapse.federation.transport.client.TransportLayerClient.get_room_hierarchy",
new=get_room_hierarchy,
):
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
self.assertEqual(federation_requests, 1)
self._assert_hierarchy(result, expected)
# The previous federation response should be reused.
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
self.assertEqual(federation_requests, 1)
self._assert_hierarchy(result, expected)
# Expire the response cache
self.reactor.advance(5 * 60 + 1)
# A new federation request should be made.
result = self.get_success(
self.handler.get_room_hierarchy(create_requester(self.user), self.space)
)
self.assertEqual(federation_requests, 2)
self._assert_hierarchy(result, expected)
class RoomSummaryTestCase(unittest.HomeserverTestCase):
servlets = [
admin.register_servlets_for_client_rest_resource,
room.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.hs = hs
self.handler = self.hs.get_room_summary_handler()
# Create a user.
self.user = self.register_user("user", "pass")
self.token = self.login("user", "pass")
# Create a simple room.
self.room = self.helper.create_room_as(self.user, tok=self.token)
self.helper.send_state(
self.room,
event_type=EventTypes.JoinRules,
body={"join_rule": JoinRules.INVITE},
tok=self.token,
)
def test_own_room(self) -> None:
"""Test a simple room created by the requester."""
result = self.get_success(self.handler.get_room_summary(self.user, self.room))
self.assertEqual(result.get("room_id"), self.room)
def test_visibility(self) -> None:
"""A user not in a private room cannot get its summary."""
user2 = self.register_user("user2", "pass")
token2 = self.login("user2", "pass")
# The user cannot see the room.
self.get_failure(self.handler.get_room_summary(user2, self.room), NotFoundError)
# If the room is made world-readable it should return a result.
self.helper.send_state(
self.room,
event_type=EventTypes.RoomHistoryVisibility,
body={"history_visibility": HistoryVisibility.WORLD_READABLE},
tok=self.token,
)
result = self.get_success(self.handler.get_room_summary(user2, self.room))
self.assertEqual(result.get("room_id"), self.room)
# Make it not world-readable again and confirm it results in an error.
self.helper.send_state(
self.room,
event_type=EventTypes.RoomHistoryVisibility,
body={"history_visibility": HistoryVisibility.JOINED},
tok=self.token,
)
self.get_failure(self.handler.get_room_summary(user2, self.room), NotFoundError)
# If the room is made public it should return a result.
self.helper.send_state(
self.room,
event_type=EventTypes.JoinRules,
body={"join_rule": JoinRules.PUBLIC},
tok=self.token,
)
result = self.get_success(self.handler.get_room_summary(user2, self.room))
self.assertEqual(result.get("room_id"), self.room)
# Join the space, make it invite-only again and results should be returned.
self.helper.join(self.room, user2, tok=token2)
self.helper.send_state(
self.room,
event_type=EventTypes.JoinRules,
body={"join_rule": JoinRules.INVITE},
tok=self.token,
)
result = self.get_success(self.handler.get_room_summary(user2, self.room))
self.assertEqual(result.get("room_id"), self.room)
def test_fed(self) -> None:
"""
Return data over federation and ensure that it is handled properly.
"""
fed_hostname = self.hs.hostname + "2"
fed_room = "#fed_room:" + fed_hostname
requested_room_entry = _RoomEntry(
fed_room,
{"room_id": fed_room, "world_readable": True},
)
async def summarize_remote_room_hierarchy(
_self: Any, room: Any, suggested_only: bool
) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]:
return requested_room_entry, {}, set()
with mock.patch(
"synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room_hierarchy",
new=summarize_remote_room_hierarchy,
):
result = self.get_success(
self.handler.get_room_summary(
self.user, fed_room, remote_room_hosts=[fed_hostname]
)
)
self.assertEqual(result.get("room_id"), fed_room)
|
f36d2897b708c270a81b3c014505acc8ec4086e3
|
bbfd441168758ed5fd9801c8330698e2ca3bbaeb
|
/tf-2-word-embeddings/code/train.py
|
ee91ee6ddb2e238fb63e323e61e2c96ddd56075a
|
[
"Apache-2.0"
] |
permissive
|
aws-samples/amazon-sagemaker-script-mode
|
66d5041bb35e55eea4efe511c83cd21d4add58db
|
54be9ca995bf33d87ccfede258f1c639e07c19fc
|
refs/heads/master
| 2023-08-03T08:49:36.256492
| 2022-03-09T00:18:07
| 2022-03-09T00:18:07
| 169,129,147
| 168
| 103
|
Apache-2.0
| 2023-07-21T04:47:36
| 2019-02-04T18:45:15
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,531
|
py
|
train.py
|
import argparse
import os
import sys
import numpy as np
import tensorflow as tf
from model_def import get_model
def parse_args():
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--num_words', type=int)
parser.add_argument('--word_index_len', type=int)
parser.add_argument('--labels_index_len', type=int)
parser.add_argument('--embedding_dim', type=int)
parser.add_argument('--max_sequence_len', type=int)
# data directories
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL'))
# embedding directory
parser.add_argument('--embedding', type=str, default=os.environ.get('SM_CHANNEL_EMBEDDING'))
# model directory: we will use the default set by SageMaker, /opt/ml/model
parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
return parser.parse_known_args()
def get_train_data(train_dir):
x_train = np.load(os.path.join(train_dir, 'x_train.npy'))
y_train = np.load(os.path.join(train_dir, 'y_train.npy'))
print('x train', x_train.shape,'y train', y_train.shape)
return x_train, y_train
def get_val_data(val_dir):
x_val = np.load(os.path.join(val_dir, 'x_val.npy'))
y_val = np.load(os.path.join(val_dir, 'y_val.npy'))
print('x val', x_val.shape,'y val', y_val.shape)
return x_val, y_val
if __name__ == "__main__":
args, _ = parse_args()
x_train, y_train = get_train_data(args.train)
x_val, y_val = get_val_data(args.val)
model = get_model(args.embedding,
args.num_words,
args.word_index_len,
args.labels_index_len,
args.embedding_dim,
args.max_sequence_len)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
model.fit(x_train, y_train,
batch_size=args.batch_size,
epochs=args.epochs,
validation_data=(x_val, y_val))
# create a TensorFlow SavedModel for deployment to a SageMaker endpoint with TensorFlow Serving
model.save(args.model_dir + '/1')
|
18a583917095d5707c20c5f9b4364715849248b7
|
f4ceb49e5c7ff44964364a24838cb8049a9e82b1
|
/pangres/tests/test_core.py
|
5b536d5a97ced6ea4b0da14c4a59fb78c3f7344d
|
[
"Unlicense"
] |
permissive
|
ThibTrip/pangres
|
b71eea437b3d5fd38bd8d6c837f1fc14d68d3552
|
d78296ef6320b89372706fb98251e09ae914f0f7
|
refs/heads/master
| 2023-09-01T08:26:25.080628
| 2023-04-07T19:53:18
| 2023-04-07T19:53:18
| 237,447,411
| 204
| 16
|
Unlicense
| 2023-08-22T21:30:10
| 2020-01-31T14:33:02
|
Python
|
UTF-8
|
Python
| false
| false
| 17,697
|
py
|
test_core.py
|
#!/usr/bin/env python
# coding: utf-8
# +
"""
End to end test similar to the scenario proposed in the docstring
of `pangres.upsert_or_aupsert`.
We will create a table and then insert with update and then ignore for the
`ON CONFLICT` clause.
"""
import pandas as pd
import pytest
from sqlalchemy import create_engine, VARCHAR
from sqlalchemy.exc import OperationalError, ProgrammingError
# local imports
from pangres import aupsert, upsert, HasNoSchemaSystemException
from pangres.examples import _TestsExampleTable
from pangres.tests.conftest import (adrop_schema, adrop_table, adrop_table_between_tests, aselect_table,
drop_schema, drop_table, drop_table_between_tests,
schema_for_testing_creation, select_table,
sync_async_exec_switch, sync_or_async_test, TableNames)
# -
# # Helpers
# ## Test data
# +
# df from which we will create a SQL table
df = _TestsExampleTable.create_example_df(nb_rows=6)
# test nulls won't create any problems
# avoid nulls in boolean column though as this is unpractical with pandas in older versions
df.iloc[0, [ix for ix, col in enumerate(df.columns) if col != 'likes_pizza']] = None
# df from which we will upsert_or_aupsert update or upsert_or_aupsert ignore
# remove one record from above and add one
# so we know that 1) old records are still there, 2) new ones get added
# 3) we can check whether the update/ignore of existing records worked
df2 = _TestsExampleTable.create_example_df(nb_rows=7)
df2 = df2.iloc[1:]
# -
# ## Expectations
# +
df_after_insert_update = pd.concat(objs=(df.loc[~df.index.isin(df2.index.tolist())], # everything that is not in df2
df2))
df_after_insert_ignore = pd.concat(objs=(df,
df2.loc[~df2.index.isin(df.index.tolist())])) # everything that is not in df
# -
# # Sync and async variants for tests
#
# (`run_test_foo`|`run_test_foo_async`) -> `test_foo`
# ## Upsert without DDL modifications (except `create_table`)
# +
@drop_table_between_tests(table_name=TableNames.END_TO_END)
def run_test_end_to_end(engine, schema, create_table, if_row_exists, df_expected):
# config
table_name = TableNames.END_TO_END
common_kwargs_upsert = dict(if_row_exists=if_row_exists, table_name=table_name)
read_table = lambda: _TestsExampleTable.read_from_db(engine=engine, schema=schema,
table_name=table_name).sort_index()
# 1. create table
upsert(con=engine, schema=schema, df=df, create_table=True, **common_kwargs_upsert)
pd.testing.assert_frame_equal(df, read_table())
# 2. insert update/ignore
upsert(con=engine, schema=schema, df=df2, create_table=create_table, **common_kwargs_upsert)
pd.testing.assert_frame_equal(df_expected, read_table())
@adrop_table_between_tests(table_name=TableNames.END_TO_END)
async def run_test_end_to_end_async(engine, schema, create_table, if_row_exists, df_expected):
# config
table_name = TableNames.END_TO_END
common_kwargs_upsert = dict(if_row_exists=if_row_exists, table_name=table_name)
async def read_table():
temp_df = await _TestsExampleTable.aread_from_db(engine=engine, schema=schema, table_name=table_name)
return temp_df.sort_index()
# 1. create table
await aupsert(con=engine, schema=schema, df=df, create_table=True, **common_kwargs_upsert)
df_db = await read_table()
pd.testing.assert_frame_equal(df, df_db)
# 2. insert update/ignore
await aupsert(con=engine, schema=schema, df=df2, create_table=create_table, **common_kwargs_upsert)
df_db = await read_table()
pd.testing.assert_frame_equal(df_expected, df_db)
# -
# ## Upsert with addition of new columns
# +
@drop_table_between_tests(table_name=TableNames.ADD_NEW_COLUMN)
def run_test_add_new_column(engine, schema):
# config
table_name = TableNames.ADD_NEW_COLUMN
dtype = {'id': VARCHAR(5)} if 'mysql' in engine.dialect.dialect_description else None
df = pd.DataFrame({'id': ['foo']}).set_index('id')
# common kwargs for all the times we use upsert_or_aupsert
common_kwargs = dict(con=engine, schema=schema, table_name=table_name,
if_row_exists='update', dtype=dtype)
# 1. create table
upsert(df=df, **common_kwargs)
# 2. add a new column and repeat upsert_or_aupsert
df['new_column'] = 'bar'
upsert(df=df, add_new_columns=True, **common_kwargs)
# verify content matches
df_db = select_table(engine=engine, schema=schema, table_name=table_name, index_col='id').sort_index()
pd.testing.assert_frame_equal(df, df_db)
@adrop_table_between_tests(table_name=TableNames.ADD_NEW_COLUMN)
async def run_test_add_new_column_async(engine, schema):
# config
table_name = TableNames.ADD_NEW_COLUMN
dtype = {'id': VARCHAR(5)} if 'mysql' in engine.dialect.dialect_description else None
df = pd.DataFrame({'id': ['foo']}).set_index('id')
# common kwargs for all the times we use upsert_or_aupsert
common_kwargs = dict(con=engine, schema=schema, table_name=table_name,
if_row_exists='update', dtype=dtype)
# 1. create table
await aupsert(df=df, **common_kwargs)
# 2. add a new column and repeat upsert_or_aupsert
df['new_column'] = 'bar'
await aupsert(df=df, add_new_columns=True, **common_kwargs)
# verify content matches
df_db = await aselect_table(engine=engine, schema=schema, table_name=table_name, index_col='id')
pd.testing.assert_frame_equal(df, df_db.sort_index())
# -
# ## Upsert with alteration of data type for empty columns
# +
@drop_table_between_tests(table_name=TableNames.CHANGE_EMPTY_COL_TYPE)
def run_test_adapt_column_type(engine, schema):
# skip for sqlite as it does not support such alteration
if 'sqlite' in engine.dialect.dialect_description:
pytest.skip('such column alteration is not possible with SQlite')
# config
table_name = TableNames.CHANGE_EMPTY_COL_TYPE
dtype = {'id': VARCHAR(5)} if 'mysql' in engine.dialect.dialect_description else None
df = pd.DataFrame({'id': ['foo'], 'empty_column': [None]}).set_index('id')
# common kwargs for all the times we use upsert_or_aupsert
common_kwargs = dict(con=engine, schema=schema, df=df, table_name=table_name,
if_row_exists='update', dtype=dtype)
# 1. create table
upsert(**common_kwargs)
# 2. add non string data in empty column and repeat upsert_or_aupsert
df['empty_column'] = 1
upsert(**common_kwargs, adapt_dtype_of_empty_db_columns=True)
@adrop_table_between_tests(table_name=TableNames.CHANGE_EMPTY_COL_TYPE)
async def run_test_adapt_column_type_async(engine, schema):
# skip for sqlite as it does not support such alteration
if 'sqlite' in engine.dialect.dialect_description:
pytest.skip('such column alteration is not possible with SQlite')
# config
table_name = TableNames.CHANGE_EMPTY_COL_TYPE
dtype = {'id': VARCHAR(5)} if 'mysql' in engine.dialect.dialect_description else None
df = pd.DataFrame({'id': ['foo'], 'empty_column': [None]}).set_index('id')
# common kwargs for all the times we use upsert_or_aupsert
common_kwargs = dict(con=engine, schema=schema, df=df, table_name=table_name,
if_row_exists='update', dtype=dtype)
# 1. create table
await aupsert(**common_kwargs)
# 2. add non string data in empty column and repeat upsert_or_aupsert
df['empty_column'] = 1
await aupsert(**common_kwargs, adapt_dtype_of_empty_db_columns=True)
# -
# ## Upsert with `create_schema=True` but `schema=None`
# +
@drop_table_between_tests(table_name=TableNames.CREATE_SCHEMA_NONE)
def run_test_create_schema_none(engine, schema):
df = pd.DataFrame({'id': [0]}).set_index('id')
upsert(con=engine, schema=None, df=df, if_row_exists='update', create_schema=True,
table_name=TableNames.CREATE_SCHEMA_NONE, create_table=True)
@adrop_table_between_tests(table_name=TableNames.CREATE_SCHEMA_NONE)
async def run_test_create_schema_none_async(engine, schema):
df = pd.DataFrame({'id': [0]}).set_index('id')
await aupsert(con=engine, schema=None, df=df, if_row_exists='update', create_schema=True,
table_name=TableNames.CREATE_SCHEMA_NONE, create_table=True)
# -
# ## Upsert with `create_schema=True` but `schema` is not None
# +
@drop_table_between_tests(table_name=TableNames.CREATE_SCHEMA_NOT_NONE)
def run_test_create_schema_not_none(engine, schema):
# local helpers
is_postgres = 'postgres' in engine.dialect.dialect_description
# overwrite schema
schema = schema_for_testing_creation
# config
df = pd.DataFrame({'id': [0]}).set_index('id')
table_name = TableNames.CREATE_SCHEMA_NOT_NONE
# drop table before test (could not get my decorator to work with another schema
# when having an optional arg schema=None due to variable scopes problems)
if is_postgres:
drop_table(engine=engine, schema=schema, table_name=table_name)
drop_schema(engine=engine, schema=schema)
try:
upsert(con=engine, schema=schema, df=df, if_row_exists='update', create_schema=True,
table_name=table_name, create_table=True)
if not is_postgres: # pragma: no cover
raise AssertionError('Expected `upsert` to fail when trying to create a schema '
'with another database than postgres')
except Exception as e:
# for postgres this should have worked
if is_postgres: # pragma: no cover
raise e
else:
assert isinstance(e, HasNoSchemaSystemException)
finally:
# drop table and schema after test
if is_postgres:
drop_table(engine=engine, schema=schema, table_name=table_name)
drop_schema(engine=engine, schema=schema)
@adrop_table_between_tests(table_name=TableNames.CREATE_SCHEMA_NOT_NONE)
async def run_test_create_schema_not_none_async(engine, schema):
# local helpers
is_postgres = 'postgres' in engine.dialect.dialect_description
# overwrite schema
schema = schema_for_testing_creation
# config
df = pd.DataFrame({'id': [0]}).set_index('id')
table_name = TableNames.CREATE_SCHEMA_NOT_NONE
# drop table before test (could not get my decorator to work with another schema
# when having an optional arg schema=None due to variable scopes problems)
if is_postgres:
await adrop_table(engine=engine, schema=schema, table_name=table_name)
await adrop_schema(engine=engine, schema=schema)
try:
await aupsert(con=engine, schema=schema, df=df, if_row_exists='update', create_schema=True,
table_name=table_name, create_table=True)
if not is_postgres: # pragma: no cover
raise AssertionError('Expected `upsert` to fail when trying to create a schema '
'with another database than postgres')
except Exception as e:
# for postgres this should have worked
if is_postgres: # pragma: no cover
raise e
else:
assert isinstance(e, HasNoSchemaSystemException)
finally:
# drop table and schema after test
if is_postgres:
await adrop_table(engine=engine, schema=schema, table_name=table_name)
await adrop_schema(engine=engine, schema=schema)
# -
# ## Test error on upsert when table does not exist (with `create_table=False`)
#
# We do not create any table here
# +
def run_test_insert_missing_table(engine, schema):
"""
Check if an error is raised when trying to insert in a missing table
and `create_table` is False.
"""
df = pd.DataFrame({'id': [0]}).set_index('id')
with pytest.raises((OperationalError, ProgrammingError)) as excinfo:
upsert(con=engine, schema=schema, df=df, table_name=TableNames.NO_TABLE,
if_row_exists='update', create_table=False)
assert any(s in str(excinfo.value) for s in ('no such table', 'does not exist', "doesn't exist"))
async def run_test_insert_missing_table_async(engine, schema):
"""
Check if an error is raised when trying to insert in a missing table
and `create_table` is False.
"""
df = pd.DataFrame({'id': [0]}).set_index('id')
with pytest.raises((OperationalError, ProgrammingError)) as excinfo:
await aupsert(con=engine, schema=schema, df=df, table_name=TableNames.NO_TABLE,
if_row_exists='update', create_table=False)
assert any(s in str(excinfo.value) for s in ('no such table', 'does not exist', "doesn't exist"))
# -
# ## Test if MySQL does not automatically create an autoincremented PK when giving it integers
#
# See [issue 56](https://github.com/ThibTrip/pangres/issues/56)
# +
@drop_table_between_tests(table_name=TableNames.PK_MYSQL)
def run_test_mysql_pk_not_auto_incremented(engine, schema):
if 'mysql' not in engine.dialect.dialect_description:
pytest.skip('This test is only relevant for MySQL')
table_name = TableNames.PK_MYSQL
# upsert first df using pangres which creates the table automatically
df1 = pd.DataFrame({'id': [0, 1], 'name': ['foo', 'bar']}).set_index('id')
upsert(con=engine, df=df1, table_name=table_name, if_row_exists='update')
# upsert second df
df2 = pd.DataFrame({'id': [100, 200], 'name': ['baz', 'qux']}).set_index('id')
upsert(con=engine, df=df2, table_name=table_name, if_row_exists='update')
# read df back
df_db = select_table(engine=engine, schema=schema, table_name=table_name, index_col='id')
# check mysql got that correctly
pd.testing.assert_frame_equal(df_db.sort_index(), pd.concat((df1, df2)).sort_index())
@adrop_table_between_tests(table_name=TableNames.PK_MYSQL)
async def run_test_mysql_pk_not_auto_incremented_async(engine, schema):
if 'mysql' not in engine.dialect.dialect_description:
pytest.skip('This test is only relevant for MySQL')
table_name = TableNames.PK_MYSQL
# upsert first df using pangres which creates the table automatically
df1 = pd.DataFrame({'id': [0, 1], 'name': ['foo', 'bar']}).set_index('id')
await aupsert(con=engine, df=df1, table_name=table_name, if_row_exists='update')
# upsert second df
df2 = pd.DataFrame({'id': [100, 200], 'name': ['baz', 'qux']}).set_index('id')
await aupsert(con=engine, df=df2, table_name=table_name, if_row_exists='update')
# read df back
df_db = await aselect_table(engine=engine, schema=schema, table_name=table_name, index_col='id')
# check mysql got that correctly
pd.testing.assert_frame_equal(df_db.sort_index(), pd.concat((df1, df2)).sort_index())
# -
# # Actual tests
# +
# after the table is created with a first `upsert_or_aupsert`
# using `create_table=False` or `create_table=True` should both work
@pytest.mark.parametrize('create_table', [False, True], ids=['create_table_false', 'create_table_true'])
@pytest.mark.parametrize('if_row_exists, df_expected', [['update', df_after_insert_update],
['ignore', df_after_insert_ignore]],
ids=['update', 'ignore'])
def test_end_to_end(engine, schema, create_table, if_row_exists, df_expected):
sync_or_async_test(engine=engine, schema=schema,
f_async=run_test_end_to_end_async,
f_sync=run_test_end_to_end,
create_table=create_table,
if_row_exists=if_row_exists,
df_expected=df_expected)
@pytest.mark.parametrize('use_async', [False, True], ids=['upsert', 'aupsert'])
def test_bad_value_if_row_exists(_, use_async):
df = pd.DataFrame({'id': [0]}).set_index('id')
engine = create_engine('sqlite:///')
upsert_func = upsert if use_async else aupsert
upsert_kwargs = dict(con=engine, df=df, table_name=TableNames.NO_TABLE, if_row_exists='test')
with pytest.raises(ValueError) as excinfo:
sync_async_exec_switch(upsert_func, **upsert_kwargs)
assert 'must be "ignore" or "update"' in str(excinfo.value)
def test_add_new_column(engine, schema):
sync_or_async_test(engine=engine, schema=schema,
f_async=run_test_add_new_column_async,
f_sync=run_test_add_new_column)
def test_adapt_column_type(engine, schema):
sync_or_async_test(engine=engine, schema=schema,
f_async=run_test_adapt_column_type_async,
f_sync=run_test_adapt_column_type)
def test_insert_missing_table(engine, schema):
sync_or_async_test(engine=engine, schema=schema,
f_async=run_test_insert_missing_table_async,
f_sync=run_test_insert_missing_table)
def test_create_schema_none(engine, schema):
sync_or_async_test(engine=engine, schema=schema,
f_async=run_test_create_schema_none_async,
f_sync=run_test_create_schema_none)
def test_create_schema_not_none(engine, schema):
sync_or_async_test(engine=engine, schema=schema,
f_async=run_test_create_schema_not_none_async,
f_sync=run_test_create_schema_not_none)
def test_mysql_pk_not_auto_incremented(engine, schema):
sync_or_async_test(engine=engine, schema=schema,
f_async=run_test_mysql_pk_not_auto_incremented_async,
f_sync=run_test_mysql_pk_not_auto_incremented)
|
e844956af17a1460b381deea397832b2e54e1ba1
|
35d3be219fef064e3049e5d3d3fb73b1928cd837
|
/bitcoinetl/mappers/join_split_mapper.py
|
09f29e304e4c2da425471c23f8fc5c6312c44a18
|
[
"MIT"
] |
permissive
|
blockchain-etl/bitcoin-etl
|
118f51c9a4b2d09e0bd8a756395e5a3ce811bd26
|
b868c93cd030c086cbd469f71bfd94799094cbf6
|
refs/heads/master
| 2023-08-31T04:19:25.324900
| 2022-04-23T19:27:54
| 2022-04-23T19:27:54
| 148,597,387
| 365
| 123
|
MIT
| 2023-08-17T05:09:49
| 2018-09-13T07:13:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,900
|
py
|
join_split_mapper.py
|
# MIT License
#
# Copyright (c) 2018 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bitcoinetl.btc_utils import bitcoin_to_satoshi
from bitcoinetl.domain.join_split import BtcJoinSplit
class BtcJoinSplitMapper(object):
def vjoinsplit_to_join_splits(self, vjoinsplit):
join_splits = []
index = 0
for item in (vjoinsplit or []):
join_split = self.json_dict_to_join_split(item)
join_split.index = index
index = index + 1
join_splits.append(join_split)
return join_splits
def json_dict_to_join_split(self, json_dict):
join_split = BtcJoinSplit()
join_split.public_input_value = bitcoin_to_satoshi(json_dict.get('vpub_new'))
join_split.public_output_value = bitcoin_to_satoshi(json_dict.get('vpub_old'))
return join_split
|
cc8fd2d0401d0341ecc2e3ee5ab1f6fe79c063cc
|
f66ac77663f2067a05c419f27dbd18e11e05be11
|
/packages/augur-core/tests/libraries/token/test_erc20_proxy.py
|
b433a6ba37e79f2cb4d58ec76ba541c4fb08a2a5
|
[
"MIT",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-only"
] |
permissive
|
AugurProject/augur
|
a31f0be384be894f933c901ad737b04ea2713c74
|
bd13a797016b373834e9414096c6086f35aa628f
|
refs/heads/dev
| 2023-09-01T23:17:45.887550
| 2021-11-05T17:40:24
| 2021-11-05T17:40:24
| 142,967,010
| 476
| 199
|
MIT
| 2023-07-23T15:15:30
| 2018-07-31T05:37:46
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 6,004
|
py
|
test_erc20_proxy.py
|
#!/usr/bin/env python
from eth_tester.exceptions import TransactionFailed
from pytest import fixture, raises
from utils import AssertLog, stringToBytes, BuyWithCash
INVALID = 0
NO = 1
YES = 2
NULL_ADDRESS = "0x0000000000000000000000000000000000000000"
@fixture(scope='session')
def testerSnapshot(sessionFixture):
return sessionFixture.createSnapshot()
@fixture
def nexus(sessionFixture, shareToken):
nexus = sessionFixture.contracts['ERC20Proxy1155Nexus']
shareToken.setApprovalForAll(nexus.address, True)
return nexus
@fixture
def tokenId(sessionFixture, shareToken, market):
return shareToken.getTokenId(market.address, INVALID)
@fixture
def erc20(sessionFixture, nexus, tokenId):
erc20Address = nexus.newERC20(tokenId)
return sessionFixture.applySignature("ERC20Proxy1155", erc20Address)
@fixture
def account0(sessionFixture):
return sessionFixture.accounts[0]
@fixture
def account1(sessionFixture):
return sessionFixture.accounts[1]
@fixture
def account2(sessionFixture):
return sessionFixture.accounts[2]
def test_proxy_required(testerSnapshot, nexus, erc20, account0, account1):
with raises(TransactionFailed):
nexus.transfer(erc20.address, account0, account1, 15)
with raises(TransactionFailed):
nexus.transferFrom(erc20.address, account0, account0, account1, 15)
with raises(TransactionFailed):
nexus.approve(erc20.address, account0, account1, 15)
def test_base_state(testerSnapshot, tokenId, erc20, nexus, account0, account1):
assert erc20.tokenId() == tokenId
assert nexus.getProxy(tokenId) == erc20.address
assert erc20.totalSupply() == 0
assert erc20.balanceOf(account0) == 0
assert nexus.allowance(erc20.address, account0, account1) == 0
def test_allowance_and_logs(testerSnapshot, cash, shareToken, market, account0, account1, account2, erc20):
shares = 2
cost = shares * market.getNumTicks()
with BuyWithCash(cash, cost, account0, "complete set buy"):
shareToken.publicBuyCompleteSets(market.address, shares)
assert erc20.allowance(account0, account1) == 0
with raises(TransactionFailed):
erc20.transferFrom(account0, account2, shares - 1, sender=account1)
erc20.approve(account1, shares - 1)
assert erc20.getLogs("Approval")[0].get("args").__dict__ == {
'owner': account0,
'spender': account1,
'value': shares - 1,
}
assert erc20.allowance(account0, account1) == shares - 1
# try to send more shares than allowed
with raises(TransactionFailed):
erc20.transferFrom(account0, account2, shares, sender=account1) # note only shares-1 have been approved
# send max approved shares
erc20.transferFrom(account0, account2, shares - 1, sender=account1)
assert erc20.getLogs("Transfer")[0].get("args").__dict__ == {
'from': account0,
'to': account2,
'value': shares - 1,
}
assert erc20.allowance(account0, account1) == 0
# should have no more allowed shares to trade
with raises(TransactionFailed):
erc20.transferFrom(account0, account2, 1, sender=account1)
def test_supply(testerSnapshot, cash, shareToken, market, nexus, erc20, account0):
shareToken.setApprovalForAll(nexus.address, True)
assert erc20.totalSupply() == 0
assert erc20.balanceOf(account0) == 0
shares = 1
cost = shares * market.getNumTicks()
with BuyWithCash(cash, cost, account0, "complete set buy"):
shareToken.publicBuyCompleteSets(market.address, shares)
assert erc20.totalSupply() == 1
assert erc20.balanceOf(account0) == 1
def test_no_overwrite_of_proxy(testerSnapshot, nexus, erc20, tokenId):
# was already created in the fixture
with raises(TransactionFailed):
nexus.newERC20(tokenId)
# can still mint new proxies though
fakeTokenId = 42
fakeErc = nexus.newERC20(fakeTokenId)
assert nexus.getProxy(tokenId) == erc20.address
assert nexus.getProxy(fakeTokenId) == fakeErc
unregisteredTokenId = 9001
assert nexus.getProxy(unregisteredTokenId) == NULL_ADDRESS
def test_can_always_get_proxy_address(testerSnapshot, nexus):
uncreatedTokenId = 9001
assert nexus.getProxyAddress(uncreatedTokenId) == "0x3CCFf0dd2eEF0Ba49a6DdCfDA9DD24d560A60e85"
def test_malicious_proxy(testerSnapshot, fixture, nexus, market, cash, shareToken, account0, account1, account2, tokenId, erc20):
maliciousProxy = fixture.upload("../../../src/contracts/trading/erc20proxy1155/ERC20Proxy1155.sol")
maliciousProxy.initialize(nexus.address, tokenId)
benignProxy = erc20
shares = 2
cost = shares * market.getNumTicks()
with BuyWithCash(cash, cost, account0, "complete set buy"):
shareToken.publicBuyCompleteSets(market.address, shares)
with raises(TransactionFailed):
maliciousProxy.transfer(account1, shares - 1)
benignProxy.transfer(account1, shares - 1)
with raises(TransactionFailed):
maliciousProxy.approve(account1, shares - 1)
benignProxy.approve(account1, shares - 1)
with raises(TransactionFailed):
maliciousProxy.transferFrom(account0, account2, shares - 1, sender=account1)
benignProxy.transferFrom(account0, account2, shares - 1, sender=account1)
def test_re_init(testerSnapshot, erc20):
with raises(TransactionFailed):
erc20.initialize(NULL_ADDRESS, 40000)
def test_bulk(testerSnapshot, sessionFixture, nexus):
erc20s = nexus.newERC20s([5, 7, 21])
assert len(erc20s) == 3
assert len(erc20s[0]) == 42 # "0x" + 40 hexes
assert len(erc20s[1]) == 42 # "0x" + 40 hexes
assert len(erc20s[2]) == 42 # "0x" + 40 hexes
erc20_5 = sessionFixture.applySignature("ERC20Proxy1155", erc20s[0])
erc20_7 = sessionFixture.applySignature("ERC20Proxy1155", erc20s[1])
erc20_21 = sessionFixture.applySignature("ERC20Proxy1155", erc20s[2])
assert erc20_5.tokenId() == 5
assert erc20_7.tokenId() == 7
assert erc20_21.tokenId() == 21
|
ee9812508022d4e95fcbd5c8e3e3ef5cb8758730
|
c7c1830f23d99806c3532b9a929c08ca0736ad58
|
/chaospy/distributions/sampler/sequences/grid.py
|
1cc9214bdc687279f974c185bb27b250f02b1d7c
|
[
"MIT"
] |
permissive
|
jonathf/chaospy
|
8a92df59fd83e39bb64921586e7971c03791eea4
|
b5959a24e0bd9b214c292485919d7ce58795f5dc
|
refs/heads/master
| 2023-08-15T16:04:55.764743
| 2023-06-03T11:35:53
| 2023-06-03T11:35:53
| 22,848,758
| 405
| 87
|
MIT
| 2023-05-18T11:52:46
| 2014-08-11T17:54:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,200
|
py
|
grid.py
|
"""
Generate samples from a regular grid.
Example usage
-------------
Basic usage::
>>> distribution = chaospy.Uniform(0, 1)
>>> samples = distribution.sample(2, rule="grid")
>>> samples.round(4)
array([0.3333, 0.6667])
>>> samples = distribution.sample(5, rule="grid")
>>> samples.round(4)
array([0.1667, 0.3333, 0.5 , 0.6667, 0.8333])
Certain orders are nested::
>>> samples = distribution.sample(3, rule="grid")
>>> samples.round(4)
array([0.25, 0.5 , 0.75])
>>> samples = distribution.sample(7, rule="grid")
>>> samples.round(4)
array([0.125, 0.25 , 0.375, 0.5 , 0.625, 0.75 , 0.875])
Create nested samples directly with the dedicated function::
>>> samples = distribution.sample(2, rule="nested_grid")
>>> samples.round(4)
array([0.25, 0.5 , 0.75])
>>> samples = distribution.sample(3, rule="nested_grid")
>>> samples.round(4)
array([0.125, 0.25 , 0.375, 0.5 , 0.625, 0.75 , 0.875])
Multivariate usage::
>>> distribution = chaospy.J(chaospy.Uniform(0, 1), chaospy.Uniform(0, 1))
>>> samples = distribution.sample(2, rule="grid")
>>> samples.round(4)
array([[0.3333, 0.3333, 0.6667, 0.6667],
[0.3333, 0.6667, 0.3333, 0.6667]])
"""
import numpy
import chaospy
from chaospy.quadrature import utils
def create_grid_samples(order, dim=1):
"""
Create samples from a regular grid.
Args:
order (int):
The order of the grid. Defines the number of samples.
dim (int):
The number of dimensions in the grid
Returns (numpy.ndarray):
Regular grid with ``shape == (dim, order)``.
"""
x_data = numpy.arange(1, order + 1) / (order + 1.0)
x_data = utils.combine([x_data] * dim)
return x_data.T
def create_nested_grid_samples(order, dim=1):
"""
Create samples from a nested grid.
Args:
order (int):
The order of the grid. Defines the number of samples.
dim (int):
The number of dimensions in the grid
Returns (numpy.ndarray):
Regular grid with ``shape == (dim, 2**order-1)``.
"""
return create_grid_samples(order=2**order - 1, dim=dim)
|
38b2a1de2c2ec5ed1f31f2406b8554c3bb84152e
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/refactoring/makeFunctionTopLevel/methodAttributeWrites.py
|
a78728affba8cd4b6eee15bae54003ce5016e32d
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
methodAttributeWrites.py
|
class C:
def __init__(self):
self.attr = True
def me<caret>thod(self):
self.attr = False
|
ad4024ee2bb90503e935c7ec0deac0ac225c0cf3
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/security/azure-mgmt-security/azure/mgmt/security/v2022_07_01_preview/models/_security_center_enums.py
|
b9e3ba7c5cc09a9d5783af9f495a02740a264d8f
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
_security_center_enums.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from azure.core import CaseInsensitiveEnumMeta
class ApplicationConditionOperator(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The application Condition's Operator, for example Contains for id or In for list of possible
IDs, see examples.
"""
CONTAINS = "Contains"
"""Checks that the string value of the data defined in Property contains the given value"""
EQUALS = "Equals"
"""Checks that the string value of the data defined in Property equals the given value"""
IN = "In"
"""Checks that the string value of the data defined in Property equals any of the given values
#: (exact fit)"""
IN_ENUM = "In"
"""Checks that the string value of the data defined in Property equals any of the given values
#: (exact fit)"""
class ApplicationSourceResourceType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The application source, what it affects, e.g. Assessments."""
ASSESSMENTS = "Assessments"
"""The source of the application is assessments"""
|
a4bfb3d3225f0fdc3b5e3696443a11e9603c3257
|
6037f170d11a1a18bf61b55fa1cc5bc202c4f4f7
|
/python/examples/debug_info.py
|
4f4328d3de518e6f031f8893e51ceade2acf2509
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Vector35/binaryninja-api
|
70da17c1f421f3911868e87250a10399e2df55d9
|
3c4cf9b06f81e6a2f614ce373a72fa68e6e1a4c0
|
refs/heads/dev
| 2023-09-05T01:12:32.981951
| 2023-09-02T05:46:31
| 2023-09-02T12:45:26
| 45,279,022
| 786
| 221
|
MIT
| 2023-09-08T19:00:06
| 2015-10-30T22:07:25
|
C++
|
UTF-8
|
Python
| false
| false
| 10,115
|
py
|
debug_info.py
|
#!/usr/bin/env python3
# If you're here, you're likely looking for boilerplate code. Here it is:
# ```
# import binaryninja as bn
#
# def is_valid(bv: bn.binaryview.BinaryView):
# return bv.view_type == "Raw"
#
# def parse_info(debug_info: bn.debuginfo.DebugInfo, bv: bn.binaryview.BinaryView):
# debug_info.add_type("name", bn.types.Type.int(4, True))
#
# debug_info.add_data_variable(0x1234, bn.types.Type.int(4, True), "name")
# debug_info.add_data_variable(0x4321, bn.types.Type.int(4, True)) # Names are optional
#
# # Just provide the information you can; we can't create the function without an address, but we'll
# # figure out what we can and you can query this info later when you have a better idea of things
# function_info = bn.debuginfo.DebugFunctionInfo(0xdead1337, "short_name", "full_name", "raw_name", bn.types.Type.int(4, False), [])
# debug_info.add_function(function_info)
#
# bn.debuginfo.DebugInfoParser.register("debug info parser", is_valid, parse_info)
# ```
# If you're interesting in applying debug info to existing BNDBs or otherwise manipulating debug info more directally, consider:
# ```
# valid_parsers = bn.debuginfo.DebugInfoParser.get_parsers_for_view(bv)
# parser = bn.debuginfo.DebugInfoParser[name]
# debug_info = parser.parse_debug_info(bv)
# bv.apply_debug_info(debug_info)
# ```
# The rest of this file serves as a test and example of implementing debug info parsers, and the resultant debug info.
#
# All that is required is to provide functions similar to "is_valid" and "parse_info" below, and call
# `binaryninja.debuginfo.DebugInfoParser.register` with a name for your parser; your parser will be made
# available for all valid binary views, with the ability to parse and apply debug info to existing BNDBs.
#
# For the purposes of this example, the following test program was compiled and the symbol `__elf_interp`
# overwritten to provide some magic for us to key on. This example should prove sufficient to
# demonstraight the capabilities of a debug info parser; providing function prototypes, local variables,
# data variables, and new types. It also highlights some limitations of BN at time of writing which
# should be fixed (see github.com/Vector35/binaryninja-api/issues/2399).
# ```
# #include <stdint.h>
# #include <stdbool.h>
#
# struct test_type_1 {
# int a;
# char b[4];
# uint64_t c;
# bool d;
# } test_var_1;
#
# struct test_type_2 {
# struct test_type_1 a;
# struct test_type_1* b;
# struct test_type_2* c;
# };
#
# int test_var_2 = 0x1232;
# const int test_var_3 = 0x1233;
# static int test_var_4 = 0x1234;
#
# void no_return_type_no_parameters() { }
#
# bool used_parameter(bool value)
# {
# return !value;
# }
#
# int unused_parameters(bool value_1, int value_2, char* value_3)
# {
# return 8*16-12/32+7|13;
# }
#
# int used_and_unused_parameters_1(int value_1, int value_2, char* value_3, bool value_4)
# {
# return value_1 + value_2;
# }
#
# uint8_t used_and_unused_parameters_2(bool value_1, uint8_t value_2, char* value_3, uint8_t value_4, char value_5)
# {
# return value_2 + value_4;
# }
#
# void local_parameters(bool value_1, uint8_t value_2, char* value_3, uint8_t value_4, char value_5)
# {
# char local_var_1 = value_1 ? value_3[15] : value_5;
# uint8_t local_var_2 = value_2 + 25;
# }
#
# int main()
# {
# int a = 0b01010101;
# int b = 0b10101010;
# return ~(a | b | test_var_2);
# }
# ```
import binaryninja as bn
import os
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_debug_info")
# Some setup code not just for informative printing
print = print
if __name__ != "__main__":
print = bn.log_error
def pretty_print_add_data_variable(
debug_info: bn.debuginfo.DebugInfo, address: int, t: bn.types.Type, name: str = None
) -> None:
print(f" Adding data variable of type `{t}` at {hex(address)} : {debug_info.add_data_variable(address, t, name)}")
def pretty_print_add_function(
debug_info: bn.debuginfo.DebugInfo, address: int, short_name: str = None, full_name: str = None, raw_name: str = None,
return_type=None, parameters=None
) -> None:
function_info = bn.debuginfo.DebugFunctionInfo(address, short_name, full_name, raw_name, return_type, parameters)
if parameters is not None:
print(
f" Adding function `{return_type} {short_name}({', '.join(f'{t} {name}' for name, t in parameters)})` at {hex(address)} : {debug_info.add_function(function_info)}"
)
else:
print(
f" Adding function `{return_type} {short_name}()` at {hex(address)} : {debug_info.add_function(function_info)}"
)
# The beginning of the actual debug info plugin
def is_valid(bv: bn.binaryview.BinaryView):
sym = bv.get_symbol_by_raw_name("__elf_interp")
if sym is None:
return False
else:
var = bv.get_data_var_at(sym.address)
return b"test_debug_info_parsing" == bv.read(sym.address, var.type.width - 1)
def parse_info(debug_info: bn.debuginfo.DebugInfo, bv: bn.binaryview.BinaryView):
print("Adding types")
types = []
for name, t in bv.parse_types_from_string(
"""
struct test_type_1 {
int a;
char b[4];
uint64_t c;
bool d;
};
struct test_type_2 {
struct test_type_1 a;
struct test_type_1* b;
struct test_type_2* c;
};"""
).types.items():
print(f" Adding type \"{name}\" `{t}` : {debug_info.add_type(str(name), t)}")
types.append(t)
print("Adding data variables")
pretty_print_add_data_variable(debug_info, 0x4030, types[0], "test_var_1")
pretty_print_add_data_variable(debug_info, 0x4010, bn.types.Type.int(4, True), "test_var_2")
# Names are optional
pretty_print_add_data_variable(debug_info, 0x4014, bn.types.Type.int(4, True))
t = bn.types.Type.int(4, True)
t.const = True
pretty_print_add_data_variable(debug_info, 0x2004, t, "test_var_3")
print("Adding functions")
char_star = bv.parse_type_string("char*")[0]
pretty_print_add_function(debug_info, 0x1129, "no_return_type_no_parameters", None, None, bn.types.Type.void(), None)
pretty_print_add_function(
debug_info, 0x1134, "used_parameter", None, None, bn.types.Type.bool(), [("value", bn.types.Type.bool())]
)
pretty_print_add_function(
debug_info, 0x1155, "unused_parameters", None, None, bn.types.Type.int(4, True),
[("value_1", bn.types.Type.bool()), ("value_2", bn.types.Type.int(4, True)), ("value_3", char_star)]
)
pretty_print_add_function(
debug_info, 0x1170, "used_and_unused_parameters_1", None, None, bn.types.Type.int(4, True),
[("value_1", bn.types.Type.int(4, True)), ("value_2", bn.types.Type.int(4, True)), ("value_3", char_star),
("value_4", bn.types.Type.bool())]
)
pretty_print_add_function(
debug_info, 0x1191, "used_and_unused_parameters_2", None, None, bn.types.Type.int(1, False),
[("value_1", bn.types.Type.bool()), ("value_2", bn.types.Type.int(1, False)), ("value_3", char_star),
("value_4", bn.types.Type.int(1, False)), ("value_5", bn.types.Type.char())]
)
pretty_print_add_function(
debug_info, 0x11c0, "local_parameters", None, None, bn.types.Type.void(), [("value_1", bn.types.Type.bool()),
("value_2", bn.types.Type.int(1, False)),
("value_3", char_star),
("value_4", bn.types.Type.int(1, False)),
("value_5", bn.types.Type.char())]
)
parser = bn.debuginfo.DebugInfoParser.register("test debug info parser", is_valid, parse_info)
print(f"Registered parser: {parser.name}")
# The above is all that is needed for a DebugInfo plugin
# The below serves to test the correctness of (the Python bindings' implementation of) debug info parsers' functionality.
bn.debuginfo.DebugInfoParser.register("dummy extra debug parser 1", lambda bv: False, lambda di, bv: None)
bn.debuginfo.DebugInfoParser.register(
"dummy extra debug parser 2", lambda bv: bv.view_type != "Raw", lambda di, bv: None
)
# Test fetching parser list and fetching by name
print(f"Availible parsers: {len(list(bn.debuginfo.DebugInfoParser))}")
for p in bn.debuginfo.DebugInfoParser:
if p == parser:
print(f" {bn.debuginfo.DebugInfoParser[p.name].name} (the one we just registered)")
else:
print(f" {bn.debuginfo.DebugInfoParser[p.name].name}")
# Test calling our `is_valid` callback
bv = bn.load(filename, options={"analysis.debugInfo.internal": False})
if parser.is_valid_for_view(bv):
print("Parser is valid")
else:
print("Parser is NOT valid!")
quit()
# Test getting list of valid parsers, and DebugInfoParser's repr
print("")
for p in bn.debuginfo.DebugInfoParser.get_parsers_for_view(bv):
print(f"`{p.name}` is valid for `{bv}`")
print("")
# Test calling our `parse_info` callback
debug_info = parser.parse_debug_info(bv)
# debug_info = bv.debug_info
print("\nEach of the following pairs of prints should be the same\n")
print("All types:")
for name, t in debug_info.types:
print(f" \"{name}\": `{t}`")
print("Types from parser:")
for name, t in debug_info.types_from_parser(parser.name):
print(f" \"{name}\": `{t}`")
print("")
print("All functions:")
for func in debug_info.functions:
print(f" {func}")
print("Functions from parser:")
for func in debug_info.functions_from_parser(parser.name):
print(f" {func}")
print("")
print("All data variables:")
for data_var in debug_info.data_variables:
print(f" {data_var}")
print("Data variables from parser:")
for data_var in debug_info.data_variables_from_parser(parser.name):
print(f" {data_var}")
print("Appling debug info!")
bv.apply_debug_info(debug_info)
bv.update_analysis_and_wait()
# Checking applied debug info
print("")
print("Types:")
for name, t in debug_info.types:
print(f" {bv.get_type_by_name(name)}")
print("")
print("Functions:")
for func in debug_info.functions:
print(f" {bv.get_function_at(func.address)}")
print("")
print("Data variables:")
for data_var in debug_info.data_variables:
print(f" {bv.get_data_var_at(data_var.address)}")
|
99ca5d12bc0b9c22ea2766b1475a491dfdc50e33
|
38e5c18fdb3da2fd51d6ffcdbd30fca1f4197220
|
/events/migrations/0016_make_searchable_uri_unique.py
|
9cce58c7fc584406c0e67d539a6ed2b7adddaefc
|
[
"BSD-2-Clause"
] |
permissive
|
GetTogetherComm/GetTogether
|
3472c00e94c25930bb5f854bdf5ddf6f0b25fe70
|
6708944bbcecb6d3d1467b096b2d72e991583d51
|
refs/heads/master
| 2023-08-20T17:57:30.082021
| 2022-04-18T22:22:54
| 2022-04-18T22:22:54
| 115,438,321
| 462
| 106
|
BSD-2-Clause
| 2023-02-15T18:23:18
| 2017-12-26T16:34:28
|
Python
|
UTF-8
|
Python
| false
| false
| 537
|
py
|
0016_make_searchable_uri_unique.py
|
# Generated by Django 2.0 on 2018-03-11 06:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("events", "0015_populate_searchable_uri")]
operations = [
migrations.AlterField(
model_name="searchable",
name="event_uri",
field=models.CharField(max_length=256, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name="searchable", name="event_url", field=models.URLField()
),
]
|
da19ea4ea943d10c0be675d3e2704854cf16ebf9
|
0aa0c78a6fcea85cf0ccc19c9b5eed25b1f71e18
|
/storm_analysis/jupyter_examples/psf_images.py
|
54af61fa714b69dc547a37f2e1ba6e8b852defc1
|
[] |
no_license
|
ZhuangLab/storm-analysis
|
5f61c3cb63d140fed43e64b1db0865dc0cfb2f62
|
26e4f8038180c3cf29909ed126daa9046f7cd8fc
|
refs/heads/master
| 2023-05-02T22:35:39.392231
| 2023-04-22T17:55:35
| 2023-04-22T17:55:35
| 9,655,861
| 102
| 81
| null | 2020-05-28T17:47:43
| 2013-04-24T19:25:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,867
|
py
|
psf_images.py
|
#!/usr/bin/env python
"""
Make pictures of a PSF.
Hazen 04/18
"""
import pickle
import matplotlib
import matplotlib.pyplot as pyplot
def psfImages(psf_filename, verbose = True, sx = 12, sy = 4):
with open(psf_filename, 'rb') as fp:
psf_data = pickle.load(fp)
psf = psf_data["psf"]
if verbose:
print("PSF shape:", psf.shape)
print("pixel size: {0:.3f}um".format(psf_data["pixel_size"]))
print("zmin, zmax: {0:.1f}nm, {1:.1f}nm".format(psf_data["zmin"], psf_data["zmax"]))
mid_xy = int(psf.shape[1]/2)
mid_z = int(psf.shape[0]/2)
xy_max = psf_data["pixel_size"] * psf.shape[1]
xy_min = 0.0
z_min = psf_data["zmin"] * 1.0e-3
z_max = psf_data["zmax"] * 1.0e-3
fig = pyplot.figure(figsize = (12,4))
ax1 = fig.add_subplot(1,3,1)
ax1.imshow(psf[mid_z,:,:],
interpolation = 'none',
extent = [xy_min, xy_max, xy_min, xy_max],
cmap = "gray")
ax1.set_title("PSF XY slice")
ax2 = fig.add_subplot(1,3,2)
ax2.imshow(psf[:,mid_xy,:],
interpolation = 'none',
extent = [xy_min, xy_max, z_min, z_max],
cmap = "gray")
ax2.set_title("PSF YZ slice")
ax3 = fig.add_subplot(1,3,3)
ax3.imshow(psf[:,:,mid_xy],
interpolation = 'none',
extent = [xy_min, xy_max, z_min, z_max],
cmap = "gray")
ax3.set_title("PSF XZ slice")
pyplot.show()
if verbose:
print("Plots are in microns")
if (__name__ == "__main__"):
import argparse
parser = argparse.ArgumentParser(description = 'Make images of a PSF.')
parser.add_argument('--psf', dest='psf', type=str, required=True,
help = "The name of the PSF format file.")
args = parser.parse_args()
psfImages(args.psf)
|
9a695e0cb3035e5118c203094a32fd439ec4ca74
|
6186a3787d1e74f1866844491da48b9643c8f1a9
|
/ghostwriter/api/urls.py
|
8e3706f71719b986629fff09209bb274c7a550e6
|
[
"BSD-3-Clause"
] |
permissive
|
GhostManager/Ghostwriter
|
b46b2421e5737ed0afbf49182dce9eeb5eb31936
|
b9eae4459ba192fbb2d4a5b66f8210d57fd7112a
|
refs/heads/master
| 2023-09-04T02:34:54.085997
| 2023-07-13T22:38:44
| 2023-07-13T22:38:44
| 197,269,443
| 1,011
| 197
|
BSD-3-Clause
| 2023-09-08T00:19:52
| 2019-07-16T21:19:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,083
|
py
|
urls.py
|
"""This contains all the URL mappings used by the API application."""
# Django Imports
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
# Ghostwriter Libraries
from ghostwriter.api.views import (
ApiKeyCreate,
ApiKeyRevoke,
GraphqlAttachFinding,
GraphqlAuthenticationWebhook,
GraphqlCheckoutDomain,
GraphqlCheckoutServer,
GraphqlDeleteEvidenceAction,
GraphqlDeleteReportTemplateAction,
GraphqlDomainCheckoutDelete,
GraphqlDomainUpdateEvent,
GraphqlEventTestView,
GraphqlGenerateReport,
GraphqlLoginAction,
GraphqlOplogEntryCreateEvent,
GraphqlOplogEntryDeleteEvent,
GraphqlOplogEntryUpdateEvent,
GraphqlServerCheckoutDelete,
GraphqlTestView,
GraphqlWhoami,
)
app_name = "api"
urlpatterns = [
# Actions
path("test", csrf_exempt(GraphqlTestView.as_view()), name="graphql_test"),
path("test_event", csrf_exempt(GraphqlEventTestView.as_view()), name="graphql_event_test"),
path("webhook", csrf_exempt(GraphqlAuthenticationWebhook.as_view()), name="graphql_webhook"),
path("login", csrf_exempt(GraphqlLoginAction.as_view()), name="graphql_login"),
path("whoami", csrf_exempt(GraphqlWhoami.as_view()), name="graphql_whoami"),
path("whoami", csrf_exempt(GraphqlWhoami.as_view()), name="graphql_whoami"),
path("generateReport", csrf_exempt(GraphqlGenerateReport.as_view()), name="graphql_generate_report"),
path("checkoutDomain", csrf_exempt(GraphqlCheckoutDomain.as_view()), name="graphql_checkout_domain"),
path("checkoutServer", csrf_exempt(GraphqlCheckoutServer.as_view()), name="graphql_checkout_server"),
path(
"deleteDomainCheckout",
csrf_exempt(GraphqlDomainCheckoutDelete.as_view()),
name="graphql_domain_checkout_delete",
),
path(
"deleteServerCheckout",
csrf_exempt(GraphqlServerCheckoutDelete.as_view()),
name="graphql_server_checkout_delete",
),
path("deleteEvidence", csrf_exempt(GraphqlDeleteEvidenceAction.as_view()), name="graphql_delete_evidence"),
path("deleteTemplate", csrf_exempt(GraphqlDeleteReportTemplateAction.as_view()), name="graphql_delete_template"),
path("attachFinding", csrf_exempt(GraphqlAttachFinding.as_view()), name="graphql_attach_finding"),
# Events
path("event/domain/update", csrf_exempt(GraphqlDomainUpdateEvent.as_view()), name="graphql_domain_update_event"),
path(
"event/oplogentry/create",
csrf_exempt(GraphqlOplogEntryCreateEvent.as_view()),
name="graphql_oplogentry_create_event",
),
path(
"event/oplogentry/update",
csrf_exempt(GraphqlOplogEntryUpdateEvent.as_view()),
name="graphql_oplogentry_update_event",
),
path(
"event/oplogentry/delete",
csrf_exempt(GraphqlOplogEntryDeleteEvent.as_view()),
name="graphql_oplogentry_delete_event",
),
path("ajax/token/revoke/<int:pk>", ApiKeyRevoke.as_view(), name="ajax_revoke_token"),
path("token/create", ApiKeyCreate.as_view(), name="ajax_create_token"),
]
|
416c3e8d1b4659bec94647fa643e4e8f7d0ad39b
|
64ab5b65afdf8d950c4b56ad2259133b95fc2fec
|
/zeus/api/schemas/email.py
|
0e626a5f4a0b05e42e1ce70628b6c1ec529b03dc
|
[
"Apache-2.0"
] |
permissive
|
getsentry/zeus
|
3e88895443b23278fdb4c25121422ee214630512
|
6d4a490c19ebe406b551641a022ca08f26c21fcb
|
refs/heads/master
| 2023-09-01T14:20:11.396306
| 2021-04-30T17:08:33
| 2021-04-30T17:08:33
| 96,131,433
| 222
| 27
|
Apache-2.0
| 2022-06-01T03:17:16
| 2017-07-03T16:39:35
|
Python
|
UTF-8
|
Python
| false
| false
| 172
|
py
|
email.py
|
from marshmallow import Schema, fields
class EmailSchema(Schema):
id = fields.UUID(dump_only=True)
email = fields.Str()
verified = fields.Bool(default=False)
|
a540bd4b76e4aeaafb6119e294c7350721949a18
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-Cocoa/PyObjCTest/test_nsmetadata.py
|
42e3127e0207005c76a58d7cd4f6917ebc1d4eea
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,061
|
py
|
test_nsmetadata.py
|
import Foundation
from PyObjCTools.TestSupport import TestCase, min_os_level, min_sdk_level
import objc
class TestNSMetaData(TestCase):
def testConstants(self):
self.assertIsInstance(
Foundation.NSMetadataQueryDidStartGatheringNotification, str
)
self.assertIsInstance(
Foundation.NSMetadataQueryGatheringProgressNotification, str
)
self.assertIsInstance(
Foundation.NSMetadataQueryDidFinishGatheringNotification, str
)
self.assertIsInstance(Foundation.NSMetadataQueryDidUpdateNotification, str)
self.assertIsInstance(
Foundation.NSMetadataQueryResultContentRelevanceAttribute, str
)
self.assertIsInstance(Foundation.NSMetadataQueryUserHomeScope, str)
self.assertIsInstance(Foundation.NSMetadataQueryLocalComputerScope, str)
self.assertIsInstance(Foundation.NSMetadataQueryNetworkScope, str)
@min_os_level("10.7")
def testConstants10_7(self):
self.assertIsInstance(Foundation.NSMetadataQueryLocalDocumentsScope, str)
self.assertIsInstance(Foundation.NSMetadataQueryUbiquitousDocumentsScope, str)
self.assertIsInstance(Foundation.NSMetadataQueryUbiquitousDataScope, str)
self.assertIsInstance(Foundation.NSMetadataItemFSNameKey, str)
self.assertIsInstance(Foundation.NSMetadataItemDisplayNameKey, str)
self.assertIsInstance(Foundation.NSMetadataItemURLKey, str)
self.assertIsInstance(Foundation.NSMetadataItemPathKey, str)
self.assertIsInstance(Foundation.NSMetadataItemFSSizeKey, str)
self.assertIsInstance(Foundation.NSMetadataItemFSCreationDateKey, str)
self.assertIsInstance(Foundation.NSMetadataItemFSContentChangeDateKey, str)
self.assertIsInstance(Foundation.NSMetadataItemIsUbiquitousKey, str)
self.assertIsInstance(
Foundation.NSMetadataUbiquitousItemHasUnresolvedConflictsKey, str
)
self.assertIsInstance(Foundation.NSMetadataUbiquitousItemIsDownloadedKey, str)
self.assertIsInstance(Foundation.NSMetadataUbiquitousItemIsDownloadingKey, str)
self.assertIsInstance(Foundation.NSMetadataUbiquitousItemIsUploadedKey, str)
self.assertIsInstance(Foundation.NSMetadataUbiquitousItemIsUploadingKey, str)
self.assertIsInstance(
Foundation.NSMetadataUbiquitousItemPercentDownloadedKey, str
)
self.assertIsInstance(
Foundation.NSMetadataUbiquitousItemPercentUploadedKey, str
)
@min_os_level("10.9")
def testConstants10_9(self):
self.assertIsInstance(Foundation.NSMetadataQueryUpdateAddedItemsKey, str)
self.assertIsInstance(Foundation.NSMetadataQueryUpdateChangedItemsKey, str)
self.assertIsInstance(Foundation.NSMetadataQueryUpdateRemovedItemsKey, str)
self.assertIsInstance(Foundation.NSMetadataQueryIndexedLocalComputerScope, str)
self.assertIsInstance(Foundation.NSMetadataQueryIndexedNetworkScope, str)
@min_os_level("10.10")
def testConstants10_10(self):
self.assertIsInstance(
Foundation.NSMetadataQueryAccessibleUbiquitousExternalDocumentsScope, str
)
def testMethods(self):
self.assertResultIsBOOL(Foundation.NSMetadataQuery.startQuery)
self.assertResultIsBOOL(Foundation.NSMetadataQuery.isStarted)
self.assertResultIsBOOL(Foundation.NSMetadataQuery.isGathering)
self.assertResultIsBOOL(Foundation.NSMetadataQuery.isStopped)
@min_os_level("10.9")
def testMethods10_9(self):
self.assertArgIsBlock(
Foundation.NSMetadataQuery.enumerateResultsUsingBlock_,
0,
b"v@" + objc._C_NSUInteger + b"o^Z",
)
self.assertArgIsBlock(
Foundation.NSMetadataQuery.enumerateResultsWithOptions_usingBlock_,
1,
b"v@" + objc._C_NSUInteger + b"o^Z",
)
@min_sdk_level("10.10")
def testProtocolObjects(self):
self.assertProtocolExists("NSMetadataQueryDelegate")
|
cf8200c126e3751d88331d3872e674c9cb831fe3
|
6cbc96c74dea3974dcfa296eeb692619d571873b
|
/scihub_eva/utils/ui_utils.py
|
3035c41b637e0890df53cd4a97d03c7a87bbc770
|
[
"LGPL-2.1-or-later",
"MIT",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
leovan/SciHubEVA
|
9c4fcb49d3c73b55905c0d7bb40de20130c55e35
|
33b89c0516f974a7c24120a570383291747342d3
|
refs/heads/main
| 2023-08-17T21:14:59.213662
| 2023-05-25T04:59:02
| 2023-05-25T04:59:02
| 134,055,753
| 980
| 171
|
MIT
| 2023-05-24T08:49:22
| 2018-05-19T11:26:48
|
Python
|
UTF-8
|
Python
| false
| false
| 439
|
py
|
ui_utils.py
|
# -*- coding: utf-8 -*-
def center_window(window, parent_window):
parent_window_center_x = parent_window.x() + int(parent_window.width() / 2)
parent_window_center_y = parent_window.y() + int(parent_window.height() / 2)
window_x = parent_window_center_x - int(window.width() / 2)
window_y = parent_window_center_y - int(window.height() / 2)
window.setPosition(window_x, window_y)
__all__ = [
'center_window'
]
|
90c84666a9a99b2e6821b6ffa9c0306c52dbcad8
|
8a40a3db07eec18178c9b8757aafdb35724ff324
|
/src/engineio/async_drivers/gevent_uwsgi.py
|
7587ac155eb45f6539cd33a9976f0c4e55dba699
|
[
"MIT"
] |
permissive
|
miguelgrinberg/python-engineio
|
52313e7fd2cd740e5a083976231c056d53c9a590
|
35cc5ec0a69b5274697928af4a163e0ca42e1afb
|
refs/heads/main
| 2023-08-18T05:55:37.901376
| 2023-08-15T18:02:04
| 2023-08-15T18:02:04
| 37,830,040
| 236
| 175
|
MIT
| 2023-09-03T15:13:49
| 2015-06-21T23:17:21
|
Python
|
UTF-8
|
Python
| false
| false
| 5,916
|
py
|
gevent_uwsgi.py
|
import gevent
from gevent import queue
from gevent.event import Event
from gevent import selectors
import uwsgi
_websocket_available = hasattr(uwsgi, 'websocket_handshake')
class Thread(gevent.Greenlet): # pragma: no cover
"""
This wrapper class provides gevent Greenlet interface that is compatible
with the standard library's Thread class.
"""
def __init__(self, target, args=[], kwargs={}):
super().__init__(target, *args, **kwargs)
def _run(self):
return self.run()
class uWSGIWebSocket(object): # pragma: no cover
"""
This wrapper class provides a uWSGI WebSocket interface that is
compatible with eventlet's implementation.
"""
def __init__(self, handler, server):
self.app = handler
self._sock = None
self.received_messages = []
def __call__(self, environ, start_response):
self._sock = uwsgi.connection_fd()
self.environ = environ
uwsgi.websocket_handshake()
self._req_ctx = None
if hasattr(uwsgi, 'request_context'):
# uWSGI >= 2.1.x with support for api access across-greenlets
self._req_ctx = uwsgi.request_context()
else:
# use event and queue for sending messages
self._event = Event()
self._send_queue = queue.Queue()
# spawn a select greenlet
def select_greenlet_runner(fd, event):
"""Sets event when data becomes available to read on fd."""
sel = selectors.DefaultSelector()
sel.register(fd, selectors.EVENT_READ)
try:
while True:
sel.select()
event.set()
except gevent.GreenletExit:
sel.unregister(fd)
self._select_greenlet = gevent.spawn(
select_greenlet_runner,
self._sock,
self._event)
self.app(self)
def close(self):
"""Disconnects uWSGI from the client."""
if self._req_ctx is None:
# better kill it here in case wait() is not called again
self._select_greenlet.kill()
self._event.set()
uwsgi.disconnect()
def _send(self, msg):
"""Transmits message either in binary or UTF-8 text mode,
depending on its type."""
if isinstance(msg, bytes):
method = uwsgi.websocket_send_binary
else:
method = uwsgi.websocket_send
if self._req_ctx is not None:
method(msg, request_context=self._req_ctx)
else:
method(msg)
def _decode_received(self, msg):
"""Returns either bytes or str, depending on message type."""
if not isinstance(msg, bytes):
# already decoded - do nothing
return msg
# only decode from utf-8 if message is not binary data
type = ord(msg[0:1])
if type >= 48: # no binary
return msg.decode('utf-8')
# binary message, don't try to decode
return msg
def send(self, msg):
"""Queues a message for sending. Real transmission is done in
wait method.
Sends directly if uWSGI version is new enough."""
if self._req_ctx is not None:
self._send(msg)
else:
self._send_queue.put(msg)
self._event.set()
def wait(self):
"""Waits and returns received messages.
If running in compatibility mode for older uWSGI versions,
it also sends messages that have been queued by send().
A return value of None means that connection was closed.
This must be called repeatedly. For uWSGI < 2.1.x it must
be called from the main greenlet."""
while True:
if self._req_ctx is not None:
try:
msg = uwsgi.websocket_recv(request_context=self._req_ctx)
except IOError: # connection closed
self.close()
return None
return self._decode_received(msg)
else:
if self.received_messages:
return self.received_messages.pop(0)
# we wake up at least every 3 seconds to let uWSGI
# do its ping/ponging
event_set = self._event.wait(timeout=3)
if event_set:
self._event.clear()
# maybe there is something to send
msgs = []
while True:
try:
msgs.append(self._send_queue.get(block=False))
except gevent.queue.Empty:
break
for msg in msgs:
try:
self._send(msg)
except IOError:
self.close()
return None
# maybe there is something to receive, if not, at least
# ensure uWSGI does its ping/ponging
while True:
try:
msg = uwsgi.websocket_recv_nb()
except IOError: # connection closed
self.close()
return None
if msg: # message available
self.received_messages.append(
self._decode_received(msg))
else:
break
if self.received_messages:
return self.received_messages.pop(0)
_async = {
'thread': Thread,
'queue': queue.JoinableQueue,
'queue_empty': queue.Empty,
'event': Event,
'websocket': uWSGIWebSocket if _websocket_available else None,
'sleep': gevent.sleep,
}
|
6b0c03686bde17fe86e5817077a736a78c4b15a1
|
4674b8088ffdf55905d44995f08a0792a3e4cd5c
|
/tests/hwsim/test_p2p_wifi_display.py
|
b032c4f42581b91e720ae2e1cb8ac00fe7264698
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
vanhoefm/krackattacks-scripts
|
41daca791638a92aa4cfa68a582e46119037560e
|
4b78669686f74efe664c6543b1b5b1616b22f902
|
refs/heads/research
| 2022-10-29T20:21:11.512335
| 2022-10-16T18:44:41
| 2022-10-16T18:44:41
| 107,408,514
| 2,184
| 577
|
NOASSERTION
| 2021-07-06T12:43:49
| 2017-10-18T12:58:08
|
C
|
UTF-8
|
Python
| false
| false
| 20,441
|
py
|
test_p2p_wifi_display.py
|
# Wi-Fi Display test cases
# Copyright (c) 2013, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
from remotehost import remote_compatible
import logging
logger = logging.getLogger()
import time
import threading
import Queue
import hwsim_utils
import utils
from p2p_utils import *
def test_wifi_display(dev):
"""Wi-Fi Display extensions to P2P"""
wfd_devinfo = "00411c440028"
dev[0].request("SET wifi_display 1")
dev[0].request("WFD_SUBELEM_SET 0 0006" + wfd_devinfo)
if wfd_devinfo not in dev[0].request("WFD_SUBELEM_GET 0"):
raise Exception("Could not fetch back configured subelement")
# Associated BSSID
dev[0].request("WFD_SUBELEM_SET 1 0006020304050607")
# Coupled Sink
dev[0].request("WFD_SUBELEM_SET 6 000700000000000000")
# Session Info
dev[0].request("WFD_SUBELEM_SET 9 0000")
# WFD Extended Capability
dev[0].request("WFD_SUBELEM_SET 7 00020000")
# WFD Content Protection
prot = "0001" + "00"
dev[0].request("WFD_SUBELEM_SET 5 " + prot)
# WFD Video Formats
video = "0015" + "010203040506070809101112131415161718192021"
dev[0].request("WFD_SUBELEM_SET 3 " + video)
# WFD 3D Video Formats
video_3d = "0011" + "0102030405060708091011121314151617"
dev[0].request("WFD_SUBELEM_SET 4 " + video_3d)
# WFD Audio Formats
audio = "000f" + "010203040506070809101112131415"
dev[0].request("WFD_SUBELEM_SET 2 " + audio)
elems = dev[0].request("WFD_SUBELEM_GET all")
if wfd_devinfo not in elems:
raise Exception("Could not fetch back configured subelements")
wfd_devinfo2 = "00001c440028"
dev[1].request("SET wifi_display 1")
dev[1].request("WFD_SUBELEM_SET 0 0006" + wfd_devinfo2)
if wfd_devinfo2 not in dev[1].request("WFD_SUBELEM_GET 0"):
raise Exception("Could not fetch back configured subelement")
dev[0].p2p_listen()
if "FAIL" in dev[1].global_request("P2P_SERV_DISC_REQ " + dev[0].p2p_dev_addr() + " wifi-display [source][pri-sink] 2,3,4,5"):
raise Exception("Setting SD request failed")
dev[1].p2p_find(social=True)
ev = dev[0].wait_global_event(["P2P-SERV-DISC-REQ"], timeout=10)
if ev is None:
raise Exception("Device discovery request not reported")
ev = dev[1].wait_global_event(["P2P-DEVICE-FOUND"], timeout=5)
if ev is None:
raise Exception("Device discovery timed out")
if "wfd_dev_info=0x" + wfd_devinfo not in ev:
raise Exception("Wi-Fi Display Info not in P2P-DEVICE-FOUND event")
if "new=1" not in ev:
raise Exception("new=1 flag missing from P2P-DEVICE-FOUND event")
ev = dev[1].wait_global_event(["P2P-SERV-DISC-RESP"], timeout=5)
if ev is None:
raise Exception("Service discovery timed out")
if prot not in ev:
raise Exception("WFD Content Protection missing from WSD response")
if video not in ev:
raise Exception("WFD Video Formats missing from WSD response")
if video_3d not in ev:
raise Exception("WFD 3D Video Formats missing from WSD response")
if audio not in ev:
raise Exception("WFD Audio Formats missing from WSD response")
dev[1].dump_monitor()
dev[0].request("WFD_SUBELEM_SET 0 0006" + wfd_devinfo2)
ev = dev[1].wait_global_event(["P2P-DEVICE-FOUND"], timeout=15)
if ev is None:
raise Exception("Peer info update timed out")
if "new=0" not in ev:
raise Exception("new=0 flag missing from P2P-DEVICE-FOUND event")
if "wfd_dev_info=0x" + wfd_devinfo2 not in ev:
raise Exception("Wi-Fi Display Info not in P2P-DEVICE-FOUND event")
dev[1].dump_monitor()
dev[0].request("WFD_SUBELEM_SET 0 0006" + wfd_devinfo)
ev = dev[1].wait_global_event(["P2P-DEVICE-FOUND"], timeout=15)
if ev is None:
raise Exception("Peer info update timed out")
if "new=0" not in ev:
raise Exception("new=0 flag missing from P2P-DEVICE-FOUND event")
if "wfd_dev_info=0x" + wfd_devinfo not in ev:
raise Exception("Wi-Fi Display Info not in P2P-DEVICE-FOUND event")
pin = dev[0].wps_read_pin()
dev[0].p2p_go_neg_auth(dev[1].p2p_dev_addr(), pin, 'display')
res1 = dev[1].p2p_go_neg_init(dev[0].p2p_dev_addr(), pin, 'enter',
timeout=20, go_intent=15, freq=2437)
res2 = dev[0].p2p_go_neg_auth_result()
bss = dev[0].get_bss("p2p_dev_addr=" + dev[1].p2p_dev_addr())
if bss['bssid'] != dev[1].p2p_interface_addr():
raise Exception("Unexpected BSSID in the BSS entry for the GO")
if wfd_devinfo2 not in bss['wfd_subelems']:
raise Exception("Could not see wfd_subelems in GO's BSS entry")
peer = dev[0].get_peer(dev[1].p2p_dev_addr())
if wfd_devinfo2 not in peer['wfd_subelems']:
raise Exception("Could not see wfd_subelems in GO's peer entry")
peer = dev[1].get_peer(dev[0].p2p_dev_addr())
if wfd_devinfo not in peer['wfd_subelems']:
raise Exception("Could not see wfd_subelems in client's peer entry")
wfd_devinfo3 = "00001c440028"
dev[2].request("SET wifi_display 1")
dev[2].request("WFD_SUBELEM_SET 0 0006" + wfd_devinfo3)
dev[2].p2p_find(social=True)
ev = dev[2].wait_global_event(["P2P-DEVICE-FOUND"], timeout=5)
if ev is None:
raise Exception("Device discovery timed out")
if dev[1].p2p_dev_addr() not in ev:
ev = dev[2].wait_global_event(["P2P-DEVICE-FOUND"], timeout=5)
if ev is None:
raise Exception("Device discovery timed out")
if dev[1].p2p_dev_addr() not in ev:
raise Exception("Could not discover GO")
if "wfd_dev_info=0x" + wfd_devinfo2 not in ev:
raise Exception("Wi-Fi Display Info not in P2P-DEVICE-FOUND event")
bss = dev[2].get_bss("p2p_dev_addr=" + dev[1].p2p_dev_addr())
if bss['bssid'] != dev[1].p2p_interface_addr():
raise Exception("Unexpected BSSID in the BSS entry for the GO")
if wfd_devinfo2 not in bss['wfd_subelems']:
raise Exception("Could not see wfd_subelems in GO's BSS entry")
peer = dev[2].get_peer(dev[1].p2p_dev_addr())
if wfd_devinfo2 not in peer['wfd_subelems']:
raise Exception("Could not see wfd_subelems in GO's peer entry")
dev[2].p2p_stop_find()
if dev[0].request("WFD_SUBELEM_GET 2") != audio:
raise Exception("Unexpected WFD_SUBELEM_GET 2 value")
if dev[0].request("WFD_SUBELEM_GET 3") != video:
raise Exception("Unexpected WFD_SUBELEM_GET 3 value")
if dev[0].request("WFD_SUBELEM_GET 4") != video_3d:
raise Exception("Unexpected WFD_SUBELEM_GET 42 value")
if dev[0].request("WFD_SUBELEM_GET 5") != prot:
raise Exception("Unexpected WFD_SUBELEM_GET 5 value")
if "FAIL" not in dev[0].request("WFD_SUBELEM_SET "):
raise Exception("Unexpected WFD_SUBELEM_SET success")
if "FAIL" not in dev[0].request("WFD_SUBELEM_SET 6"):
raise Exception("Unexpected WFD_SUBELEM_SET success")
if "OK" not in dev[0].request("WFD_SUBELEM_SET 6 "):
raise Exception("Unexpected WFD_SUBELEM_SET failure")
if "FAIL" not in dev[0].request("WFD_SUBELEM_SET 6 0"):
raise Exception("Unexpected WFD_SUBELEM_SET success")
if "FAIL" not in dev[0].request("WFD_SUBELEM_SET 6 0q"):
raise Exception("Unexpected WFD_SUBELEM_SET success")
if dev[0].request("WFD_SUBELEM_GET 6") != "":
raise Exception("Unexpected WFD_SUBELEM_GET 6 response")
if dev[0].request("WFD_SUBELEM_GET 8") != "":
raise Exception("Unexpected WFD_SUBELEM_GET 8 response")
if dev[0].global_request("WFD_SUBELEM_GET 2") != audio:
raise Exception("Unexpected WFD_SUBELEM_GET 2 value from global interface")
if "OK" not in dev[0].global_request("WFD_SUBELEM_SET 1 0006020304050608"):
raise Exception("WFD_SUBELEM_SET failed on global interface")
if dev[0].request("WFD_SUBELEM_GET 1") != "0006020304050608":
raise Exception("Unexpected WFD_SUBELEM_GET 1 value (per-interface)")
elems = dev[0].request("WFD_SUBELEM_GET all")
if "OK" not in dev[0].request("WFD_SUBELEM_SET all " + elems):
raise Exception("WFD_SUBELEM_SET all failed")
if dev[0].request("WFD_SUBELEM_GET all") != elems:
raise Exception("Mismatch in WFS_SUBELEM_SET/GET all")
test = "00000600411c440028"
if "OK" not in dev[0].request("WFD_SUBELEM_SET all " + test):
raise Exception("WFD_SUBELEM_SET all failed")
if dev[0].request("WFD_SUBELEM_GET all") != test:
raise Exception("Mismatch in WFS_SUBELEM_SET/GET all")
if "FAIL" not in dev[0].request("WFD_SUBELEM_SET all qwerty"):
raise Exception("Invalid WFD_SUBELEM_SET all succeeded")
if "FAIL" not in dev[0].request("WFD_SUBELEM_SET all 11"):
raise Exception("Invalid WFD_SUBELEM_SET all succeeded")
dev[0].request("WFD_SUBELEM_SET all 112233445566")
dev[0].request("WFD_SUBELEM_SET all ff0000fe0000fd00")
if "FAIL" not in dev[0].request("WFD_SUBELEM_SET 300 112233"):
raise Exception("Invalid WFD_SUBELEM_SET 300 succeeded")
if "FAIL" not in dev[0].request("WFD_SUBELEM_SET -1 112233"):
raise Exception("Invalid WFD_SUBELEM_SET -1 succeeded")
if "FAIL" not in dev[0].request("WFD_SUBELEM_GET 300"):
raise Exception("Invalid WFD_SUBELEM_GET 300 succeeded")
if "FAIL" not in dev[0].request("WFD_SUBELEM_GET -1"):
raise Exception("Invalid WFD_SUBELEM_GET -1 succeeded")
dev[0].request("SET wifi_display 0")
dev[1].request("SET wifi_display 0")
dev[2].request("SET wifi_display 0")
def test_wifi_display_r2(dev):
"""Wi-Fi Display extensions to P2P with R2 subelems"""
wfd_devinfo = "00411c440028"
dev[0].request("SET wifi_display 1")
dev[0].request("WFD_SUBELEM_SET 0 0006" + wfd_devinfo)
# Associated BSSID
dev[0].request("WFD_SUBELEM_SET 1 0006020304050607")
# Coupled Sink
dev[0].request("WFD_SUBELEM_SET 6 000700000000000000")
# Session Info
dev[0].request("WFD_SUBELEM_SET 9 0000")
# WFD Extended Capability
dev[0].request("WFD_SUBELEM_SET 7 00020000")
# WFD Content Protection
prot = "0001" + "00"
dev[0].request("WFD_SUBELEM_SET 5 " + prot)
# WFD Video Formats
video = "0015" + "010203040506070809101112131415161718192021"
dev[0].request("WFD_SUBELEM_SET 3 " + video)
# WFD 3D Video Formats
video_3d = "0011" + "0102030405060708091011121314151617"
dev[0].request("WFD_SUBELEM_SET 4 " + video_3d)
# WFD Audio Formats
audio = "000f" + "010203040506070809101112131415"
dev[0].request("WFD_SUBELEM_SET 2 " + audio)
# MAC Info
mac_info = "0006" + "112233445566"
dev[0].request("WFD_SUBELEM_SET 10 " + mac_info)
# R2 Device Info
r2_dev_info = "0006" + "aabbccddeeff"
dev[0].request("WFD_SUBELEM_SET 11 " + r2_dev_info)
elems = dev[0].request("WFD_SUBELEM_GET all")
if wfd_devinfo not in elems:
raise Exception("Could not fetch back configured subelements")
wfd_devinfo2 = "00001c440028"
dev[1].request("SET wifi_display 1")
dev[1].request("WFD_SUBELEM_SET 0 0006" + wfd_devinfo2)
if wfd_devinfo2 not in dev[1].request("WFD_SUBELEM_GET 0"):
raise Exception("Could not fetch back configured subelement")
dev[0].p2p_listen()
dev[1].p2p_find(social=True)
ev = dev[1].wait_global_event(["P2P-DEVICE-FOUND"], timeout=5)
if ev is None:
raise Exception("Device discovery timed out")
if "wfd_dev_info=0x" + wfd_devinfo not in ev:
raise Exception("Wi-Fi Display Info not in P2P-DEVICE-FOUND event")
if "new=1" not in ev:
raise Exception("new=1 flag missing from P2P-DEVICE-FOUND event")
pin = dev[0].wps_read_pin()
dev[0].p2p_go_neg_auth(dev[1].p2p_dev_addr(), pin, 'display')
res1 = dev[1].p2p_go_neg_init(dev[0].p2p_dev_addr(), pin, 'enter',
timeout=20, go_intent=15, freq=2437)
res2 = dev[0].p2p_go_neg_auth_result()
bss = dev[0].get_bss("p2p_dev_addr=" + dev[1].p2p_dev_addr())
if bss['bssid'] != dev[1].p2p_interface_addr():
raise Exception("Unexpected BSSID in the BSS entry for the GO")
if wfd_devinfo2 not in bss['wfd_subelems']:
raise Exception("Could not see wfd_subelems in GO's BSS entry")
peer = dev[0].get_peer(dev[1].p2p_dev_addr())
if wfd_devinfo2 not in peer['wfd_subelems']:
raise Exception("Could not see wfd_subelems in GO's peer entry")
peer = dev[1].get_peer(dev[0].p2p_dev_addr())
if wfd_devinfo not in peer['wfd_subelems']:
raise Exception("Could not see wfd_subelems in client's peer entry")
if r2_dev_info not in peer['wfd_subelems']:
raise Exception("Could not see r2_dev_info in client's peer entry")
elems = dev[0].request("WFD_SUBELEM_GET all")
if "OK" not in dev[0].request("WFD_SUBELEM_SET all " + elems):
raise Exception("WFD_SUBELEM_SET all failed")
if dev[0].request("WFD_SUBELEM_GET all") != elems:
raise Exception("Mismatch in WFS_SUBELEM_SET/GET all")
test = "00000600411c440028"
if "OK" not in dev[0].request("WFD_SUBELEM_SET all " + test):
raise Exception("WFD_SUBELEM_SET all failed")
if dev[0].request("WFD_SUBELEM_GET all") != test:
raise Exception("Mismatch in WFS_SUBELEM_SET/GET all")
dev[0].request("SET wifi_display 0")
dev[1].request("SET wifi_display 0")
dev[2].request("SET wifi_display 0")
def enable_wifi_display(dev):
dev.request("SET wifi_display 1")
dev.request("WFD_SUBELEM_SET 0 000600411c440028")
def test_wifi_display_go_invite(dev):
"""P2P GO with Wi-Fi Display inviting a client to join"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
try:
enable_wifi_display(dev[0])
enable_wifi_display(dev[1])
enable_wifi_display(dev[2])
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1, social=True):
raise Exception("Peer " + addr1 + " not found")
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0, social=True):
raise Exception("Peer " + addr0 + " not found")
dev[1].p2p_listen()
logger.info("Authorize invitation")
pin = dev[1].wps_read_pin()
dev[1].global_request("P2P_CONNECT " + addr0 + " " + pin + " join auth")
dev[0].p2p_start_go(freq=2412)
# Add test client to the group
connect_cli(dev[0], dev[2], social=True, freq=2412)
logger.info("Invite peer to join the group")
dev[0].p2p_go_authorize_client(pin)
dev[0].global_request("P2P_INVITE group=" + dev[0].group_ifname + " peer=" + addr1)
ev = dev[1].wait_global_event(["P2P-INVITATION-RECEIVED",
"P2P-GROUP-STARTED"], timeout=20)
if ev is None:
raise Exception("Timeout on invitation on peer")
if "P2P-INVITATION-RECEIVED" in ev:
raise Exception("Unexpected request to accept pre-authorized invitation")
dev[0].remove_group()
dev[1].wait_go_ending_session()
dev[2].wait_go_ending_session()
finally:
dev[0].request("SET wifi_display 0")
dev[1].request("SET wifi_display 0")
dev[2].request("SET wifi_display 0")
def test_wifi_display_persistent_group(dev):
"""P2P persistent group formation and re-invocation with Wi-Fi Display enabled"""
try:
enable_wifi_display(dev[0])
enable_wifi_display(dev[1])
enable_wifi_display(dev[2])
form(dev[0], dev[1])
peer = dev[1].get_peer(dev[0].p2p_dev_addr())
listen_freq = peer['listen_freq']
invite_from_cli(dev[0], dev[1])
invite_from_go(dev[0], dev[1])
dev[0].dump_monitor()
dev[1].dump_monitor()
networks = dev[0].list_networks(p2p=True)
if len(networks) != 1:
raise Exception("Unexpected number of networks")
if "[P2P-PERSISTENT]" not in networks[0]['flags']:
raise Exception("Not the persistent group data")
if "OK" not in dev[0].global_request("P2P_GROUP_ADD persistent=" + networks[0]['id'] + " freq=" + listen_freq):
raise Exception("Could not start GO")
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=2)
if ev is None:
raise Exception("GO start up timed out")
dev[0].group_form_result(ev)
connect_cli(dev[0], dev[2], social=True, freq=listen_freq)
dev[0].dump_monitor()
dev[1].dump_monitor()
invite(dev[1], dev[0])
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=30)
if ev is None:
raise Exception("Timeout on group re-invocation (on client)")
dev[1].group_form_result(ev)
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected P2P-GROUP-START on GO")
hwsim_utils.test_connectivity_p2p(dev[0], dev[1])
finally:
dev[0].request("SET wifi_display 0")
dev[1].request("SET wifi_display 0")
dev[2].request("SET wifi_display 0")
@remote_compatible
def test_wifi_display_invalid_subelem(dev):
"""Wi-Fi Display and invalid subelement parsing"""
addr1 = dev[1].p2p_dev_addr()
try:
enable_wifi_display(dev[0])
enable_wifi_display(dev[1])
dev[1].request("WFD_SUBELEM_SET 0 ffff00411c440028")
dev[1].p2p_listen()
dev[0].p2p_find(social=True)
ev = dev[0].wait_global_event(["P2P-DEVICE-FOUND"], timeout=10)
if ev is None:
raise Exception("Device discovery timed out")
if "wfd_dev_info=" in ev:
raise Exception("Invalid WFD subelement was shown")
finally:
dev[0].request("SET wifi_display 0")
dev[1].request("SET wifi_display 0")
def test_wifi_display_parsing(dev):
"""Wi-Fi Display extensions to P2P and special parsing cases"""
try:
_test_wifi_display_parsing(dev)
finally:
dev[1].request("VENDOR_ELEM_REMOVE 11 *")
dev[0].request("SET wifi_display 0")
def _test_wifi_display_parsing(dev):
wfd_devinfo = "00411c440028"
dev[0].request("SET wifi_display 1")
dev[0].request("WFD_SUBELEM_SET 0 0006" + wfd_devinfo)
dev[0].p2p_start_go(freq=2412)
# P2P Client with invalid WFD IE
if "OK" not in dev[1].request("VENDOR_ELEM_ADD 11 dd10506f9a0a000000010000060000ffffff"):
raise Exception("VENDOR_ELEM_ADD failed")
pin = dev[1].wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
dev[1].p2p_connect_group(dev[0].p2p_dev_addr(), pin, timeout=60,
social=True, freq=2412)
bssid = dev[0].get_group_status_field('bssid')
dev[2].scan_for_bss(bssid, freq=2412, force_scan=True)
bss = dev[2].get_bss(bssid)
if bss['wfd_subelems'] != "000006" + wfd_devinfo:
raise Exception("Unexpected WFD elements in scan results: " + bss['wfd_subelems'])
# P2P Client without WFD IE
pin = dev[2].wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
dev[2].p2p_connect_group(dev[0].p2p_dev_addr(), pin, timeout=60,
social=True, freq=2412)
dev[2].remove_group()
dev[0].remove_group()
dev[1].wait_go_ending_session()
def test_wifi_display_disable(dev):
"""Peer disabling Wi-Fi Display advertisement"""
try:
enable_wifi_display(dev[1])
dev[1].p2p_listen()
dev[0].p2p_find(social=True)
ev = dev[0].wait_global_event(["P2P-DEVICE-FOUND"], timeout=15)
if ev is None:
raise Exception("Peer not found")
if "wfd_dev_info" not in ev:
raise Exception("Missing wfd_dev_info")
dev[1].request("SET wifi_display 0")
ev = dev[0].wait_global_event(["P2P-DEVICE-FOUND"], timeout=10)
if ev is None:
raise Exception("Peer update not indicated")
if "new=0" not in ev:
raise Exception("Incorrect update event: " + ev)
if "wfd_dev_info" in ev:
raise Exception("Unexpected wfd_dev_info")
ev = dev[0].wait_global_event(["P2P-DEVICE-FOUND"], timeout=0.75)
if ev is not None:
raise Exception("Unexpected peer found event: " + ev)
dev[0].p2p_stop_find()
dev[1].p2p_stop_find()
finally:
dev[1].request("SET wifi_display 0")
|
53be0c9abe39cebb7aec6e8005117dabc4ef6d87
|
0ca218c0f54dac33a2ade4accfdf8f5be3207588
|
/lib/sqlalchemy/dialects/mysql/expression.py
|
c5bd0be02b0d9d7a44d42e82dba3a43af24ff137
|
[
"MIT"
] |
permissive
|
sqlalchemy/sqlalchemy
|
9d949c67c9b5396b1f33e7ff0f3230c81babf5be
|
b382bff6e3464f039db0fd1f2ce1b79038675e48
|
refs/heads/main
| 2023-08-31T17:40:59.565421
| 2023-08-30T15:01:41
| 2023-08-30T15:01:41
| 159,271,175
| 8,083
| 1,489
|
MIT
| 2023-09-12T18:53:55
| 2018-11-27T03:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 4,066
|
py
|
expression.py
|
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
from ... import exc
from ... import util
from ...sql import coercions
from ...sql import elements
from ...sql import operators
from ...sql import roles
from ...sql.base import _generative
from ...sql.base import Generative
from ...util.typing import Self
class match(Generative, elements.BinaryExpression):
"""Produce a ``MATCH (X, Y) AGAINST ('TEXT')`` clause.
E.g.::
from sqlalchemy import desc
from sqlalchemy.dialects.mysql import match
match_expr = match(
users_table.c.firstname,
users_table.c.lastname,
against="Firstname Lastname",
)
stmt = (
select(users_table)
.where(match_expr.in_boolean_mode())
.order_by(desc(match_expr))
)
Would produce SQL resembling::
SELECT id, firstname, lastname
FROM user
WHERE MATCH(firstname, lastname) AGAINST (:param_1 IN BOOLEAN MODE)
ORDER BY MATCH(firstname, lastname) AGAINST (:param_2) DESC
The :func:`_mysql.match` function is a standalone version of the
:meth:`_sql.ColumnElement.match` method available on all
SQL expressions, as when :meth:`_expression.ColumnElement.match` is
used, but allows to pass multiple columns
:param cols: column expressions to match against
:param against: expression to be compared towards
:param in_boolean_mode: boolean, set "boolean mode" to true
:param in_natural_language_mode: boolean , set "natural language" to true
:param with_query_expansion: boolean, set "query expansion" to true
.. versionadded:: 1.4.19
.. seealso::
:meth:`_expression.ColumnElement.match`
"""
__visit_name__ = "mysql_match"
inherit_cache = True
def __init__(self, *cols, **kw):
if not cols:
raise exc.ArgumentError("columns are required")
against = kw.pop("against", None)
if against is None:
raise exc.ArgumentError("against is required")
against = coercions.expect(
roles.ExpressionElementRole,
against,
)
left = elements.BooleanClauseList._construct_raw(
operators.comma_op,
clauses=cols,
)
left.group = False
flags = util.immutabledict(
{
"mysql_boolean_mode": kw.pop("in_boolean_mode", False),
"mysql_natural_language": kw.pop(
"in_natural_language_mode", False
),
"mysql_query_expansion": kw.pop("with_query_expansion", False),
}
)
if kw:
raise exc.ArgumentError("unknown arguments: %s" % (", ".join(kw)))
super().__init__(left, against, operators.match_op, modifiers=flags)
@_generative
def in_boolean_mode(self) -> Self:
"""Apply the "IN BOOLEAN MODE" modifier to the MATCH expression.
:return: a new :class:`_mysql.match` instance with modifications
applied.
"""
self.modifiers = self.modifiers.union({"mysql_boolean_mode": True})
return self
@_generative
def in_natural_language_mode(self) -> Self:
"""Apply the "IN NATURAL LANGUAGE MODE" modifier to the MATCH
expression.
:return: a new :class:`_mysql.match` instance with modifications
applied.
"""
self.modifiers = self.modifiers.union({"mysql_natural_language": True})
return self
@_generative
def with_query_expansion(self) -> Self:
"""Apply the "WITH QUERY EXPANSION" modifier to the MATCH expression.
:return: a new :class:`_mysql.match` instance with modifications
applied.
"""
self.modifiers = self.modifiers.union({"mysql_query_expansion": True})
return self
|
a1ac8ec13fb5f6f520a4e2520f86bd7335188886
|
9ce06078a5cd85910fbba882404e877359873989
|
/pysmt/test/smtlib/test_smtlibscript.py
|
2ae7c9debc2bc614ac30c74fb0eea81fde502a3f
|
[
"Apache-2.0"
] |
permissive
|
pysmt/pysmt
|
af164bd5cb6f622e6cdf174bff5aba8bed35e583
|
8c79de2635936f980595f4a43ee20a7da7554844
|
refs/heads/master
| 2023-08-23T09:46:10.573341
| 2023-05-03T09:35:39
| 2023-05-03T09:35:39
| 27,359,780
| 536
| 150
|
Apache-2.0
| 2023-07-27T11:46:09
| 2014-12-01T02:52:26
|
Python
|
UTF-8
|
Python
| false
| false
| 11,013
|
py
|
test_smtlibscript.py
|
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from io import StringIO
import pysmt.smtlib.commands as smtcmd
from pysmt.shortcuts import And, Or, Symbol, GT, Real, Not
from pysmt.typing import REAL
from pysmt.test import TestCase, main
from pysmt.smtlib.script import SmtLibScript, SmtLibCommand
from pysmt.smtlib.script import smtlibscript_from_formula, evaluate_command
from pysmt.smtlib.parser import get_formula_strict, get_formula, SmtLibParser
from pysmt.solvers.smtlib import SmtLibIgnoreMixin
from pysmt.logics import QF_UFLIRA
from pysmt.exceptions import UndefinedLogicError, PysmtValueError
class TestSmtLibScript(TestCase):
def test_basic_operations(self):
script = SmtLibScript()
script.add(name=smtcmd.SET_LOGIC,
args=[None])
self.assertIsNotNone(SmtLibScript())
self.assertTrue(len(script) > 0)
res = script.contains_command(smtcmd.SET_LOGIC)
self.assertTrue(res)
res = script.contains_command(smtcmd.CHECK_SAT)
self.assertFalse(res)
res = script.count_command_occurrences(smtcmd.CHECK_SAT)
self.assertEqual(res, 0, "Was expecting 0 occurrences of check-sat")
res = script.count_command_occurrences(smtcmd.SET_LOGIC)
self.assertEqual(res, 1, "Was expecting 1 occurrences of set-logic")
res = script.filter_by_command_name([smtcmd.SET_LOGIC])
self.assertEqual(len(list(res)), 1)
def test_declare_sort(self):
class SmtLibIgnore(SmtLibIgnoreMixin):
declare_sort_history = []
def declare_sort(self, name, arity):
self.declare_sort_history.append((name, arity))
mock = SmtLibIgnore()
parser = SmtLibParser()
smtlib_script = '\n'.join(['(declare-sort s0 0)', \
'(declare-sort s1 1)', \
'(declare-const c0 s0)', \
'(declare-const c1 (s1 Int))'])
outstream = StringIO(smtlib_script)
script = parser.get_script(outstream)
script.evaluate(solver=mock)
self.assertEqual(len(mock.declare_sort_history), 2)
s0_name, s0_arity = mock.declare_sort_history[0]
s1_name, s1_arity = mock.declare_sort_history[1]
self.assertEqual(s0_name, "s0")
self.assertEqual(s0_arity, 0)
self.assertEqual(s1_name, "s1")
self.assertEqual(s1_arity, 1)
def test_from_formula(self):
x, y = Symbol("x"), Symbol("y")
f = And(x, Or(y, x))
script = smtlibscript_from_formula(f)
self.assertIsNotNone(script)
outstream = StringIO()
script.serialize(outstream)
output = outstream.getvalue()
self.assertIn("(set-logic ", output)
self.assertIn("(declare-fun x () Bool)", output)
self.assertIn("(declare-fun y () Bool)", output)
self.assertIn("(check-sat)", output)
# Use custom logic (as str)
script2 = smtlibscript_from_formula(f, logic="BOOL")
outstream = StringIO()
script2.serialize(outstream)
output = outstream.getvalue()
self.assertIn("(set-logic BOOL)", output)
# Use custom logic (as Logic obj)
script3 = smtlibscript_from_formula(f, logic=QF_UFLIRA)
outstream = StringIO()
script3.serialize(outstream)
output = outstream.getvalue()
self.assertIn("(set-logic QF_UFLIRA)", output)
# Custom logic must be a Logic or Str
with self.assertRaises(UndefinedLogicError):
smtlibscript_from_formula(f, logic=4)
def test_get_strict_formula(self):
smtlib_single = """
(set-logic UFLIRA)
(declare-fun x () Bool)
(declare-fun y () Bool)
(declare-fun r () Real)
(assert (> r 0.0))
(assert x)
(check-sat)
"""
smtlib_double = smtlib_single + """
(assert (not y))
(check-sat)
"""
r = Symbol("r", REAL)
x, y = Symbol("x"), Symbol("y")
target_one = And(GT(r, Real(0)), x)
target_two = And(GT(r, Real(0)), x, Not(y))
stream_in = StringIO(smtlib_single)
f = get_formula(stream_in)
self.assertEqual(f, target_one)
stream_in = StringIO(smtlib_double)
f = get_formula(stream_in)
self.assertEqual(f, target_two)
stream_in = StringIO(smtlib_double)
with self.assertRaises(PysmtValueError):
f = get_formula_strict(stream_in)
def test_define_funs_same_args(self):
# n is defined once as an Int and once as a Real
smtlib_script = "\n".join(['(define-fun f ((n Int)) Int n)', '(define-fun f ((n Real)) Real n)'])
stream = StringIO(smtlib_script)
parser = SmtLibParser()
_ = parser.get_script(stream)
# No exceptions are thrown
self.assertTrue(True)
def test_define_funs_arg_and_fun(self):
smtlib_script = "\n".join(['(define-fun f ((n Int)) Int n)', '(declare-fun n () Real)'])
stream = StringIO(smtlib_script)
parser = SmtLibParser()
_ = parser.get_script(stream)
# No exceptions are thrown
self.assertTrue(True)
def test_define_fun_serialize_complex_type(self):
smtlib_script = '(define-fun f ((var (_ BitVec 32))) (_ BitVec 32) var)'
stream = StringIO(smtlib_script)
parser = SmtLibParser()
script = parser.get_script(stream)
# No exceptions are thrown
self.assertEqual(smtlib_script.replace('var', '__var0'), script.commands[0].serialize_to_string())
def test_evaluate_command(self):
class SmtLibIgnore(SmtLibIgnoreMixin):
pass
mock = SmtLibIgnore()
for cmd_name in [ smtcmd.SET_INFO,
smtcmd.ASSERT,
smtcmd.CHECK_SAT,
smtcmd.EXIT,
smtcmd.SET_LOGIC,
smtcmd.DECLARE_CONST,
smtcmd.PUSH,
smtcmd.POP]:
evaluate_command(SmtLibCommand(cmd_name, [None, None]),
solver=mock)
evaluate_command(SmtLibCommand(smtcmd.DECLARE_FUN,
[None, None, None]),
solver=mock)
evaluate_command(SmtLibCommand(smtcmd.DEFINE_FUN,
[None, None, None, None]),
solver=mock)
def test_smtlibignore_mixin(self):
"""In SmtLibIgnoreMixin, all SMT-LIB methods return None."""
class SmtLibIgnore(SmtLibIgnoreMixin):
pass
solver = SmtLibIgnore()
self.assertIsNone(solver.set_logic(None))
self.assertIsNone(solver.declare_fun(None))
self.assertIsNone(solver.declare_const(None))
self.assertIsNone(solver.define_fun(None, None, None, None))
self.assertIsNone(solver.declare_sort(None, None))
self.assertIsNone(solver.define_sort(None, None, None))
self.assertIsNone(solver.assert_(None))
self.assertIsNone(solver.get_assertions())
self.assertIsNone(solver.check_sat())
self.assertIsNone(solver.get_proof())
self.assertIsNone(solver.get_unsat_core())
self.assertIsNone(solver.get_values(None))
self.assertIsNone(solver.get_assignment())
self.assertIsNone(solver.push())
self.assertIsNone(solver.pop())
self.assertIsNone(solver.get_option(None))
self.assertIsNone(solver.set_option(None, None))
self.assertIsNone(solver.get_info(None))
self.assertIsNone(solver.set_info(None, None))
self.assertIsNone(solver.exit())
def test_all_parsing(self):
# Create a small file that tests all commands of smt-lib 2
parser = SmtLibParser()
nie = 0
for cmd in DEMO_SMTSCRIPT:
try:
next(parser.get_command_generator(StringIO(cmd)))
except NotImplementedError:
nie += 1
# There are currently 3 not-implemented commands
self.assertEqual(nie, 3)
DEMO_SMTSCRIPT = [ "(declare-fun a () Bool)",
"(declare-fun b () Bool)",
"(declare-fun c () Bool)",
"(assert true)",
"(assert (not a))",
"(check-sat)",
"(check-sat-assuming (a b c))",
"(check-sat-assuming ((not a) b (not c)))",
"(declare-const d Bool)",
"(declare-fun abc () Int)",
"(declare-sort A 0)",
"(declare-sort B 0)",
"(declare-sort C 0)",
"(declare-sort D 1)",
"(define-sort E () (D Int))",
"(declare-sort F 2)",
"(define-sort G (H) (F Int H))",
"(declare-fun e () B)",
"(define-fun f ((a Bool)) B e)",
"(define-fun g ((a Bool)) B (f a))",
"(define-fun h ((a Int)) Int a)",
"(declare-const x Bool)",
"(declare-const y Int)",
"(assert (= (h y) y))",
"(assert (= (f x) x))",
"(check-sat)",
"(define-fun-rec f ((a A)) B a)",
"(define-fun-rec g ((a A)) B (g a))",
"""(define-funs-rec ((h ((a A)) B) (i ((a A)) B) )
( (i a) (h a))
)
""",
"(define-sort A () B)",
"(define-sort A (B C) (Array B C))",
"(echo \"hello world\")",
"(exit)",
"(get-assertions)",
"(get-assignment)",
"(get-info :name)",
"(get-model)",
"(get-option :keyword)",
"(get-proof)",
"(get-unsat-assumptions)",
"(get-unsat-core)",
"(get-value (x y z))",
"(pop 42)",
"(push 42)",
"(reset)",
"(reset-assertions)",
"(set-info :number 42)",
"(set-logic QF_LIA)",
"(set-option :produce-models true)",
]
if __name__ == "__main__":
main()
|
0f103936804d37ef51767e756df9631d7fb2925b
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/external/wpt/dom/nodes/Document-createElement-namespace-tests/generate.py
|
20c866bee80b94d36863cbd724a7e4abfc1ff635
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 2,130
|
py
|
generate.py
|
#!/usr/bin/python
import os
import sys
THIS_NAME = u"generate.py"
# Note: these lists must be kept in sync with the lists in
# Document-createElement-namespace.html, and this script must be run whenever
# the lists are updated. (We could keep the lists in a shared JSON file, but
# seems like too much effort.)
FILES = (
(u"empty", u""),
(u"minimal_html", u"<!doctype html><title></title>"),
(u"xhtml", u'<html xmlns="http://www.w3.org/1999/xhtml"></html>'),
(u"svg", u'<svg xmlns="http://www.w3.org/2000/svg"></svg>'),
(u"mathml", u'<mathml xmlns="http://www.w3.org/1998/Math/MathML"></mathml>'),
(u"bare_xhtml", u"<html></html>"),
(u"bare_svg", u"<svg></svg>"),
(u"bare_mathml", u"<math></math>"),
(u"xhtml_ns_removed", u"""\
<html xmlns="http://www.w3.org/1999/xhtml">
<head><script>
var newRoot = document.createElementNS(null, "html");
document.removeChild(document.documentElement);
document.appendChild(newRoot);
</script></head>
</html>
"""),
(u"xhtml_ns_changed", u"""\
<html xmlns="http://www.w3.org/1999/xhtml">
<head><script>
var newRoot = document.createElementNS("http://www.w3.org/2000/svg", "abc");
document.removeChild(document.documentElement);
document.appendChild(newRoot);
</script></head>
</html>
"""),
)
EXTENSIONS = (
u"html",
u"xhtml",
u"xml",
u"svg",
# Was not able to get server MIME type working properly :(
#"mml",
)
def __main__():
if len(sys.argv) > 1:
print(u"No arguments expected, aborting")
return
if not os.access(THIS_NAME, os.F_OK):
print(u"Must be run from the directory of " + THIS_NAME + u", aborting")
return
for name in os.listdir(u"."):
if name == THIS_NAME:
continue
os.remove(name)
manifest = open(u"MANIFEST", u"w")
for name, contents in FILES:
for extension in EXTENSIONS:
f = open(name + u"." + extension, u"w")
f.write(contents)
f.close()
manifest.write(u"support " + name + u"." + extension + u"\n")
manifest.close()
__main__()
|
253ca8f8b8e17afb49e675429cae8e0ca2681fca
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/core/azure-core/azure/core/pipeline/policies/_sensitive_header_cleanup_policy.py
|
c883bb2bea0f58e04651e9c9236ce2ee366bee4a
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,632
|
py
|
_sensitive_header_cleanup_policy.py
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from typing import List, Optional, Any, TypeVar
from azure.core.pipeline import PipelineRequest
from azure.core.pipeline.transport import HttpRequest as LegacyHttpRequest, HttpResponse as LegacyHttpResponse
from azure.core.rest import HttpRequest, HttpResponse
from ._base import SansIOHTTPPolicy
HTTPResponseType = TypeVar("HTTPResponseType", HttpResponse, LegacyHttpResponse)
HTTPRequestType = TypeVar("HTTPRequestType", HttpRequest, LegacyHttpRequest)
class SensitiveHeaderCleanupPolicy(SansIOHTTPPolicy[HTTPRequestType, HTTPResponseType]):
"""A simple policy that cleans up sensitive headers
:keyword list[str] blocked_redirect_headers: The headers to clean up when redirecting to another domain.
:keyword bool disable_redirect_cleanup: Opt out cleaning up sensitive headers when redirecting to another domain.
"""
DEFAULT_SENSITIVE_HEADERS = set(
[
"Authorization",
"x-ms-authorization-auxiliary",
]
)
def __init__(
self, # pylint: disable=unused-argument
*,
blocked_redirect_headers: Optional[List[str]] = None,
disable_redirect_cleanup: bool = False,
**kwargs: Any
) -> None:
self._disable_redirect_cleanup = disable_redirect_cleanup
self._blocked_redirect_headers = (
SensitiveHeaderCleanupPolicy.DEFAULT_SENSITIVE_HEADERS
if blocked_redirect_headers is None
else blocked_redirect_headers
)
def on_request(self, request: PipelineRequest[HTTPRequestType]) -> None:
"""This is executed before sending the request to the next policy.
:param request: The PipelineRequest object.
:type request: ~azure.core.pipeline.PipelineRequest
"""
# "insecure_domain_change" is used to indicate that a redirect
# has occurred to a different domain. This tells the SensitiveHeaderCleanupPolicy
# to clean up sensitive headers. We need to remove it before sending the request
# to the transport layer.
insecure_domain_change = request.context.options.pop("insecure_domain_change", False)
if not self._disable_redirect_cleanup and insecure_domain_change:
for header in self._blocked_redirect_headers:
request.http_request.headers.pop(header, None)
|
318e777de39f7becee73a1ce1889dade9a3e792f
|
63425f441348aa67d45dbab00a134fc604e18a4e
|
/python_utils/pycaffe_utils.py
|
9ce1d6a07fc8795f52d51f1b6194fbab5200e934
|
[
"MIT"
] |
permissive
|
s-gupta/fast-rcnn
|
079cbc39c7ca90a342b5d91626de9db1c7dc9de9
|
da935a12be8078c4d8684100016f82b5a4b02afc
|
refs/heads/distillation
| 2020-04-05T10:16:55.800004
| 2017-03-03T00:10:55
| 2017-03-03T00:10:55
| 42,637,609
| 104
| 38
| null | 2017-01-13T10:16:43
| 2015-09-17T05:52:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,053
|
py
|
pycaffe_utils.py
|
# ---------------------------------------------------------
# Copyright (c) 2015, Saurabh Gupta
#
# Licensed under The MIT License [see LICENSE for details]
# ---------------------------------------------------------
import caffe, yaml
def net_surgery(net, json_file_or_dict):
# Load the JSON file
if isinstance(json_file_or_dict, str):
with open(json_file_or_dict, 'rt') as f:
source_description = yaml.load(f)
else:
source_description = json_file_or_dict
# Find a list of blobs in the target net
target_blobs = net.params.keys()
target_blobs = dict(zip(target_blobs, [0]*len(target_blobs)))
# For each item in the json file load the network and copy the layers
for src_desc in source_description:
# caffe.set_logging_level(1)
net_source = caffe.Net(src_desc['prototxt'], src_desc['model'], caffe.TEST)
# caffe.set_logging_level(0)
for j in xrange(len(src_desc['copy_ops']['dest'])):
dest_name = src_desc['copy_ops']['dest'][j]
assert dest_name in target_blobs, \
'Destination name {} not in target network blobs'.format(dest_name)
src_name = src_desc['copy_ops']['source'][j]
assert src_name in net_source.params.keys(), \
'Source name {} not in source network blobs'.format(src_name)
allow_different_shape = src_desc['copy_ops']['reshape'][j]
if target_blobs[dest_name] is not 0:
print 'Target blob {} is being reassigned'.format(dest_name)
target_blobs[dest_name] = target_blobs[dest_name] + 1
assert(len(net.params[dest_name]) == \
len(net_source.params[src_name])), \
'Number of blobs in {} in source do not match number of blobs in {} in destination'\
.format(src_name, dest_name)
for k in xrange(len(net.params[dest_name])):
src = net_source.params[src_name][k]
dest = net.params[dest_name][k]
if allow_different_shape:
assert(src.count == dest.count), \
'Count of blobs in {}[{:d}] in source do not match count of blobs in {}[{:d}] in destination'\
.format(src_name, k, dest_name, k)
dest.data[...] = src.data.reshape(dest.data.shape)
else:
src_shape = src.data.shape
dest_shape = dest.data.shape
assert(src_shape == dest_shape), \
'Shape of blobs in {}[{:d}] {} in source do not match shape of blobs in {}[{:d}] {} in destination'\
.format(src_name, k, str(src_shape), dest_name, k, str(dest_shape))
dest.data[...] = src.data
unusual = [x for x in target_blobs.keys() if target_blobs[x] is not 1]
for x in unusual:
print 'Parameter blob {} copied {:d} times.'.format(x, target_blobs[x])
return target_blobs
|
e1ecd285c75b4958abdf6e5c33c0010dc0050371
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/office365/onedrive/columns/validation.py
|
1c27c739a66ee690047a47ddc8970fc05a8aacf5
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 832
|
py
|
validation.py
|
from office365.onedrive.columns.display_name_localization import DisplayNameLocalization
from office365.runtime.client_value import ClientValue
from office365.runtime.client_value_collection import ClientValueCollection
class ColumnValidation(ClientValue):
"""Represents properties that validates column values."""
def __init__(self, formula=None, descriptions=None, default_language=None):
"""
:param str formula: The formula to validate column value.
:param list[DisplayNameLocalization] descriptions: The formula to validate column value.
:param str default_language: The formula to validate column value.
"""
self.formula = formula
self.descriptions = ClientValueCollection(DisplayNameLocalization, descriptions)
self.defaultLanguage = default_language
|
c7f44e93a2a6c5d91ce4c533cc5d8efa3922ac24
|
d30855895ee0c6ddaef493039dd0e0f1298eeae6
|
/tests/valai_tests.py
|
b7f168b6dfe2df57f62ad120238a142e1931e6fc
|
[
"GPL-1.0-or-later",
"GPL-3.0-only",
"MIT"
] |
permissive
|
Ezhil-Language-Foundation/open-tamil
|
f5f28463bff4400aa131b4a428e8f3e17aa63997
|
8ea745440f96fe587cf0959d12e990ad7923e60e
|
refs/heads/main
| 2022-12-23T13:50:19.758812
| 2022-12-16T21:56:02
| 2022-12-16T21:56:02
| 14,263,826
| 246
| 72
|
MIT
| 2022-09-24T17:49:10
| 2013-11-09T19:48:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,605
|
py
|
valai_tests.py
|
# -*- coding: utf-8 -*-
# (C) 2016-17 Muthiah Annamalai
import codecs
import os
from pprint import pprint
from opentamiltests import *
from spell import Speller, LoadDictionary, OttruSplit, Mayangoli, ASpell
from valai import solthiruthi as tamilpesu
from valai import vaani
CURRDIR = os.path.dirname(os.path.abspath(__file__))
class TamilpesuTest(unittest.TestCase):
def test_basic(self):
tp = tamilpesu.SpellChecker()
try:
options = tp.check_word(u"வாணி என்பது ஒரு")
except Exception as e:
return
self.assertEqual(len(options), 1)
self.assertGreaterEqual(len(options[0].alternatives), 2)
def test_aspell_parse_fmt(self):
results = {}
with codecs.open(os.path.join(CURRDIR, "data", "aspell.out"), "r", "utf-8") as fp:
data = fp.read()
ASpell.parse_result(results, data)
self.assertEqual(len(results), 15)
self.assertTrue("செய்வது" in results)
self.assertEqual(len(results["செய்வது"]), 19)
miss = ["செய்து", "செய்தி", "நெய்து", "பெய்து", "செய்", "ஆய்வு", "உய்வு", "எய்து", "ஓய்வு", "சத்து", "செய்ய",
"யுவதி", "தேய்வு", "தொய்வு", "பெய்வி", "வாய்வு", "வயது", "யாது", "தயவு"]
self.assertListEqual(results["செய்வது"], miss)
if __name__ == "__main__":
unittest.main()
|
968d2c886a4babbbd9b0db61f34f4a40650ca860
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/stripe/stripe/stripe_object.pyi
|
6b93f44c7ac7181d28d6eee261e61b82b92f337d
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 1,913
|
pyi
|
stripe_object.pyi
|
import json
from _typeshed import Self
from typing import Any
from stripe import api_requestor as api_requestor
class StripeObject(dict[Any, Any]):
class ReprJSONEncoder(json.JSONEncoder):
def default(self, obj): ...
def __init__(
self,
id: Any | None = ...,
api_key: Any | None = ...,
stripe_version: Any | None = ...,
stripe_account: Any | None = ...,
last_response: Any | None = ...,
**params,
) -> None: ...
@property
def last_response(self): ...
def update(self, update_dict): ...
def __setattr__(self, k, v): ...
def __getattr__(self, k): ...
def __delattr__(self, k): ...
def __setitem__(self, k, v) -> None: ...
def __getitem__(self, k): ...
def __delitem__(self, k) -> None: ...
def __reduce__(self): ...
@classmethod
def construct_from(
cls: type[Self],
values: Any,
key: str | None,
stripe_version: Any | None = ...,
stripe_account: Any | None = ...,
last_response: Any | None = ...,
) -> Self: ...
api_key: Any
stripe_version: Any
stripe_account: Any
def refresh_from(
self,
values: Any,
api_key: Any | None = ...,
partial: bool = ...,
stripe_version: Any | None = ...,
stripe_account: Any | None = ...,
last_response: Any | None = ...,
) -> None: ...
@classmethod
def api_base(cls) -> None: ...
def request(self, method, url, params: Any | None = ..., headers: Any | None = ...): ...
def request_stream(self, method, url, params: Any | None = ..., headers: Any | None = ...): ...
def to_dict(self): ...
def to_dict_recursive(self): ...
@property
def stripe_id(self): ...
def serialize(self, previous): ...
def __copy__(self) -> StripeObject: ...
def __deepcopy__(self, memo: Any) -> StripeObject: ...
|
89f1a0907ebe251861ea698c2a4c9073a836c664
|
426a68c76101cbaaf2b3e1d6243a4792694717a3
|
/extract_feats/opensmile.py
|
f51e98cff2ef3c053014bd11caf46ec5a8213fc5
|
[
"MIT"
] |
permissive
|
Renovamen/Speech-Emotion-Recognition
|
7e7e42af1ffdd5d49941cd9ab2451f23bdb64eba
|
64e2f48be4ed0ed3fba3f85af824209f6c5c3884
|
refs/heads/master
| 2023-04-08T21:18:31.169988
| 2022-04-16T18:46:02
| 2022-04-16T18:51:32
| 180,302,915
| 710
| 184
|
MIT
| 2023-03-25T01:20:47
| 2019-04-09T06:47:32
|
Python
|
UTF-8
|
Python
| false
| false
| 5,505
|
py
|
opensmile.py
|
import os
import csv
import sys
from typing import Tuple, Union
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import joblib
import utils
# 每个特征集的特征数量
FEATURE_NUM = {
'IS09_emotion': 384,
'IS10_paraling': 1582,
'IS11_speaker_state': 4368,
'IS12_speaker_trait': 6125,
'IS13_ComParE': 6373,
'ComParE_2016': 6373
}
def get_feature_opensmile(config, filepath: str) -> list:
"""
用 Opensmile 提取一个音频的特征
Args:
config: 配置项
file_path (str): 音频路径
Returns:
vector (list): 该音频的特征向量
"""
# 项目路径
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# single_feature.csv 路径
single_feat_path = os.path.join(BASE_DIR, config.feature_folder, 'single_feature.csv')
# Opensmile 配置文件路径
opensmile_config_path = os.path.join(config.opensmile_path, 'config', config.opensmile_config + '.conf')
# Opensmile 命令
cmd = 'cd ' + config.opensmile_path + ' && ./SMILExtract -C ' + opensmile_config_path + ' -I ' + filepath + ' -O ' + single_feat_path + ' -appendarff 0'
print("Opensmile cmd: ", cmd)
os.system(cmd)
reader = csv.reader(open(single_feat_path,'r'))
rows = [row for row in reader]
last_line = rows[-1]
return last_line[1: FEATURE_NUM[config.opensmile_config] + 1]
def load_feature(config, train: bool) -> Union[Tuple[np.ndarray], np.ndarray]:
"""
从 "{config.feature_folder}/*.csv" 文件中加载特征数据
Args:
config: 配置项
train (bool): 是否为训练数据
Returns:
- X (Tuple[np.ndarray]): 训练特征、测试特征和对应的标签
- X (np.ndarray): 预测特征
"""
feature_path = os.path.join(config.feature_folder, "train.csv" if train == True else "predict.csv")
# 加载特征数据
df = pd.read_csv(feature_path)
features = [str(i) for i in range(1, FEATURE_NUM[config.opensmile_config] + 1)]
X = df.loc[:,features].values
Y = df.loc[:,'label'].values
# 标准化模型路径
scaler_path = os.path.join(config.checkpoint_path, 'SCALER_OPENSMILE.m')
if train == True:
# 标准化数据
scaler = StandardScaler().fit(X)
# 保存标准化模型
utils.mkdirs(config.checkpoint_path)
joblib.dump(scaler, scaler_path)
X = scaler.transform(X)
# 划分训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 42)
return x_train, x_test, y_train, y_test
else:
# 标准化数据
# 加载标准化模型
scaler = joblib.load(scaler_path)
X = scaler.transform(X)
return X
def get_data(config, data_path: str, train: bool) -> Union[Tuple[np.ndarray], np.ndarray]:
"""
用 Opensmile 提取所有音频的特征: 遍历所有文件夹, 读取每个文件夹中的音频, 提取每个音频的
特征,把所有特征保存在 "{config.feature_path}/*.csv" 文件中。
Args:
config: 配置项
data_path (str): 数据集文件夹 / 测试文件路径
train (bool): 是否为训练数据
Returns:
- train = True: 训练特征、测试特征和对应的标签
- train = False: 预测特征
"""
# 如果 config.feature_folder 文件夹不存在,则新建一个
utils.mkdirs(config.feature_folder)
# 特征存储路径
feature_path = os.path.join(config.feature_folder, "train.csv" if train == True else "predict.csv")
# 写表头
writer = csv.writer(open(feature_path, 'w'))
first_row = ['label']
for i in range(1, FEATURE_NUM[config.opensmile_config] + 1):
first_row.append(str(i))
writer.writerow(first_row)
writer = csv.writer(open(feature_path, 'a+'))
print('Opensmile extracting...')
if train == True:
cur_dir = os.getcwd()
sys.stderr.write('Curdir: %s\n' % cur_dir)
os.chdir(data_path)
# 遍历文件夹
for i, directory in enumerate(config.class_labels):
sys.stderr.write("Started reading folder %s\n" % directory)
os.chdir(directory)
# label_name = directory
label = config.class_labels.index(directory)
# 读取该文件夹下的音频
for filename in os.listdir('.'):
if not filename.endswith('wav'):
continue
filepath = os.path.join(os.getcwd(), filename)
# 提取该音频的特征
feature_vector = get_feature_opensmile(config, filepath)
feature_vector.insert(0, label)
# 把每个音频的特征整理到一个 csv 文件中
writer.writerow(feature_vector)
sys.stderr.write("Ended reading folder %s\n" % directory)
os.chdir('..')
os.chdir(cur_dir)
else:
feature_vector = get_feature_opensmile(config, data_path)
feature_vector.insert(0, '-1')
writer.writerow(feature_vector)
print('Opensmile extract done.')
# 一个玄学 bug 的暂时性解决方案
# 这里无法直接加载除了 IS10_paraling 以外的其他特征集的预测数据特征,非常玄学
if train == True:
return load_feature(config, train=train)
|
c35e894b0a7f568b91481535598eb2ea50bfc95b
|
f241df59f8e6c13cab13ec3b5d5d9ade89c419f7
|
/leo/external/npyscreen/apNPSApplication.py
|
24000f238aa091d13e658797e102a7acf24c885e
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"MIT"
] |
permissive
|
leo-editor/leo-editor
|
6c6e09c1ae89cb9b1952c9f5b0c3a6c76ae9e625
|
a3f6c3ebda805dc40cd93123948f153a26eccee5
|
refs/heads/devel
| 2023-08-28T08:57:01.365701
| 2023-08-23T10:21:57
| 2023-08-23T10:21:57
| 16,728,437
| 1,671
| 219
|
NOASSERTION
| 2023-09-14T19:39:01
| 2014-02-11T11:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,446
|
py
|
apNPSApplication.py
|
#@+leo-ver=5-thin
#@+node:ekr.20170428084207.3: * @file ../external/npyscreen/apNPSApplication.py
#!/usr/bin/env python
from leo.core import leoGlobals as g
assert g
#@+others
#@+node:ekr.20170428084207.4: ** Declarations
import curses
# import locale
# import _curses
from . import npyssafewrapper
#@+node:ekr.20170428084207.5: ** class AlreadyOver
class AlreadyOver(Exception):
pass
#@+node:ekr.20170428084207.6: ** class NPSApp
class NPSApp:
_run_called = 0
#@+others
#@+node:ekr.20170428084207.7: *3* main
def main(self):
"""Overload this method to create your application"""
#@+node:ekr.20170428084207.8: *3* resize
def resize(self):
pass
#@+node:ekr.20170428084207.9: *3* __remove_argument_call_main
def __remove_argument_call_main(self, screen, enable_mouse=True):
# screen disgarded.
if enable_mouse:
curses.mousemask(curses.ALL_MOUSE_EVENTS)
del screen
return self.main()
#@+node:ekr.20170428084207.10: *3* NPS.run
def run(self, fork=None):
"""Run application. Calls Mainloop wrapped properly."""
# g.trace('===== (NPS) fork:', repr(fork))
if fork is None:
return npyssafewrapper.wrapper(self.__remove_argument_call_main)
else:
return npyssafewrapper.wrapper(self.__remove_argument_call_main, fork=fork)
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@-leo
|
6dcb89e00e3105cc88125cb45f76f10c5295ba89
|
9f84d91a8ae3df53b07fe3267992fba00a99ac9e
|
/examples/super_gat.py
|
400d6098530639fcecc0e94edf539bf2330de5d1
|
[
"MIT"
] |
permissive
|
pyg-team/pytorch_geometric
|
ebea601eae228f3905465b5c2349d3fb3bb5cb26
|
a52af694b8ce6a80811e20966fe6d08a3e7511fe
|
refs/heads/master
| 2023-08-31T04:13:40.943308
| 2023-08-30T12:48:42
| 2023-08-30T12:48:42
| 106,024,057
| 6,775
| 1,563
|
MIT
| 2023-09-14T17:10:18
| 2017-10-06T16:03:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
super_gat.py
|
import os.path as osp
import time
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from torch_geometric.nn import SuperGATConv
dataset = 'Cora'
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset)
dataset = Planetoid(path, dataset, transform=T.NormalizeFeatures())
data = dataset[0]
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = SuperGATConv(dataset.num_features, 8, heads=8,
dropout=0.6, attention_type='MX',
edge_sample_ratio=0.8, is_undirected=True)
self.conv2 = SuperGATConv(8 * 8, dataset.num_classes, heads=8,
concat=False, dropout=0.6,
attention_type='MX', edge_sample_ratio=0.8,
is_undirected=True)
def forward(self, x, edge_index):
x = F.dropout(x, p=0.6, training=self.training)
x = F.elu(self.conv1(x, edge_index))
att_loss = self.conv1.get_attention_loss()
x = F.dropout(x, p=0.6, training=self.training)
x = self.conv2(x, data.edge_index)
att_loss += self.conv2.get_attention_loss()
return F.log_softmax(x, dim=-1), att_loss
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model, data = Net().to(device), data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4)
def train(data):
model.train()
optimizer.zero_grad()
out, att_loss = model(data.x, data.edge_index)
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss += 4.0 * att_loss
loss.backward()
optimizer.step()
@torch.no_grad()
def test(data):
model.eval()
out, accs = model(data.x, data.edge_index)[0], []
for _, mask in data('train_mask', 'val_mask', 'test_mask'):
pred = out[mask].argmax(1)
acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
times = []
for epoch in range(1, 501):
start = time.time()
train(data)
train_acc, val_acc, test_acc = test(data)
print(f'Epoch: {epoch:03d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}, '
f'Test: {test_acc:.4f}')
times.append(time.time() - start)
print(f"Median time per epoch: {torch.tensor(times).median():.4f}s")
|
69328638e3dff03e12dd9d82f62fd0b3a2f253de
|
8e907b84175c08397b7ea0eea3c7aa5f90a58f28
|
/aligulac/simul/formats/teampl.py
|
4f52d984e91d73f7d806dd29dfc1c3a85467093a
|
[] |
no_license
|
TheBB/aligulac
|
7e5c081b608278368563650307a521691211ef05
|
4b18637a6efb898821f15beac9947551aca2a424
|
refs/heads/master
| 2022-12-12T05:16:10.186091
| 2022-02-17T06:29:31
| 2022-04-20T11:24:29
| 7,734,890
| 212
| 31
| null | 2022-12-08T05:19:48
| 2013-01-21T15:38:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
teampl.py
|
from simul.formats.match import Match
class Tally:
def __init__(self, rounds):
self.finishes = [0] * rounds
self.win, self.loss = 0.0, 0.0
def __getitem__(self, key):
return self.finishes[key]
def __setitem__(self, key, value):
self.finishes[key] = value
def __len__(self):
return len(self.finishes)
def __iter__(self):
return iter(self.finishes)
def scale(self, scale):
self.finishes = [f/scale for f in self.finishes]
class TeamPL:
def __init__(self, num):
self._num = num
def set_players(self, players):
self._pla = players[:len(players)//2]
self._plb = players[len(players)//2:]
self._nplayers = len(self._pla)
self._numw = self._nplayers//2 + 1
self._nums = (self._nplayers - 1)//2
self._matches = []
for i in range(0,self._nplayers):
m = Match(self._num)
m.set_players([self._pla[i], self._plb[i]])
self._matches.append(m)
def get_match(self, i):
return self._matches[int(i)]
def get_tally(self):
return self._tally
def compute(self, N=1000):
self._tally = [Tally(self._nplayers+1), Tally(self._nplayers+1)]
for m in self._matches:
m.compute()
for i in range(0,N):
self.compute_inst(1.0/N)
def compute_inst(self, base):
sca, scb = 0, 0
for m in self._matches:
inst = m.random_instance_detail(new=True)
if inst[1] > inst[2]:
sca += 1
else:
scb += 1
if sca >= self._numw or scb >= self._numw:
break
self._tally[0][sca + max(self._nums - scb, 0)] += base
self._tally[1][scb + max(self._nums - sca, 0)] += base
if sca > scb:
self._tally[0].win += base
self._tally[1].loss += base
else:
self._tally[1].win += base
self._tally[0].loss += base
|
96ce201085a759607ef4cd15cf45361f7642af9c
|
bdf0d4d3aac186af3ad0ad6ac9f380f9a0573fba
|
/aries_cloudagent/core/plugin_registry.py
|
f286e2e4049f6e8f41c41a56c3b98fdb3a2a0d3e
|
[
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] |
permissive
|
hyperledger/aries-cloudagent-python
|
f25d961e0717a4d703bf43df3e4b4bc8ec07b908
|
39cac36d8937ce84a9307ce100aaefb8bc05ec04
|
refs/heads/main
| 2023-09-01T15:37:05.353674
| 2023-08-31T14:13:06
| 2023-08-31T14:13:06
| 193,556,007
| 370
| 530
|
Apache-2.0
| 2023-09-14T17:59:34
| 2019-06-24T18:12:14
|
Python
|
UTF-8
|
Python
| false
| false
| 14,676
|
py
|
plugin_registry.py
|
"""Handle registration of plugin modules for extending functionality."""
import logging
from collections import OrderedDict
from types import ModuleType
from typing import Sequence, Iterable
from ..config.injection_context import InjectionContext
from ..core.event_bus import EventBus
from ..utils.classloader import ClassLoader, ModuleLoadError
from .error import ProtocolDefinitionValidationError
from .protocol_registry import ProtocolRegistry
from .goal_code_registry import GoalCodeRegistry
LOGGER = logging.getLogger(__name__)
class PluginRegistry:
"""Plugin registry for indexing application plugins."""
def __init__(self, blocklist: Iterable[str] = []):
"""Initialize a `PluginRegistry` instance."""
self._plugins = OrderedDict()
self._blocklist = set(blocklist)
@property
def plugin_names(self) -> Sequence[str]:
"""Accessor for a list of all plugin modules."""
return list(self._plugins.keys())
@property
def plugins(self) -> Sequence[ModuleType]:
"""Accessor for a list of all plugin modules."""
return list(self._plugins.values())
def validate_version(self, version_list, module_name):
"""Validate version dict format."""
is_list = isinstance(version_list, list)
# Must be a list
if not is_list:
raise ProtocolDefinitionValidationError(
"Versions definition is not of type list"
)
# Must have at least one definition
if len(version_list) < 1:
raise ProtocolDefinitionValidationError(
"Versions list must define at least one version module"
)
if not all(isinstance(v, dict) for v in version_list):
raise ProtocolDefinitionValidationError(
"Element of versions definition list is not of type dict"
)
for version_dict in version_list:
# Dicts must have correct format
try:
if not (
isinstance(version_dict["major_version"], int)
and isinstance(version_dict["minimum_minor_version"], int)
and isinstance(version_dict["current_minor_version"], int)
and isinstance(version_dict["path"], str)
):
raise ProtocolDefinitionValidationError(
"Unexpected types in version definition"
)
except KeyError as e:
raise ProtocolDefinitionValidationError(
f"Element of versions definition list is missing an attribute: {e}"
)
# Version number cannot be negative
if (
version_dict["major_version"] < 0
or version_dict["minimum_minor_version"] < 0
or version_dict["current_minor_version"] < 0
):
raise ProtocolDefinitionValidationError(
"Version number cannot be negative"
)
# Minimum minor version cannot be great than current version
if (
version_dict["minimum_minor_version"]
> version_dict["current_minor_version"]
):
raise ProtocolDefinitionValidationError(
"Minimum supported minor version cannot"
+ " be greater than current minor version"
)
# There can only be one definition per major version
major_version = version_dict["major_version"]
count = 0
for version_dict_outer in version_list:
if version_dict_outer["major_version"] == major_version:
count += 1
if count > 1:
raise ProtocolDefinitionValidationError(
"There can only be one definition per major version. "
+ f"Found {count} for major version {major_version}."
)
# Specified module must be loadable
version_path = version_dict["path"]
mod = ClassLoader.load_module(version_path, module_name)
if not mod:
raise ProtocolDefinitionValidationError(
"Version module path is not "
+ f"loadable: {module_name}, {version_path}"
)
return True
def register_plugin(self, module_name: str) -> ModuleType:
"""Register a plugin module."""
if module_name in self._plugins:
mod = self._plugins[module_name]
elif module_name in self._blocklist:
LOGGER.debug(f"Blocked {module_name} from loading due to blocklist")
return None
else:
try:
mod = ClassLoader.load_module(module_name)
LOGGER.debug(f"Loaded module: {module_name}")
except ModuleLoadError as e:
LOGGER.error(f"Error loading plugin module: {e}")
return None
# Module must exist
if not mod:
LOGGER.error(f"Module doesn't exist: {module_name}")
return None
# Any plugin with a setup method is considered valid.
if hasattr(mod, "setup"):
self._plugins[module_name] = mod
return mod
# Make an exception for non-protocol modules
# that contain admin routes and for old-style protocol
# modules without version support
routes = ClassLoader.load_module("routes", module_name)
message_types = ClassLoader.load_module("message_types", module_name)
if routes or message_types:
self._plugins[module_name] = mod
return mod
definition = ClassLoader.load_module("definition", module_name)
# definition.py must exist in protocol
if not definition:
LOGGER.error(f"Protocol does not include definition.py: {module_name}")
return None
# definition.py must include versions attribute
if not hasattr(definition, "versions"):
LOGGER.error(
"Protocol definition does not include "
f"versions attribute: {module_name}"
)
return None
# Definition list must not be malformed
try:
self.validate_version(definition.versions, module_name)
except ProtocolDefinitionValidationError as e:
LOGGER.error(f"Protocol versions definition is malformed. {e}")
return None
self._plugins[module_name] = mod
return mod
# # Load each version as a separate plugin
# for version in definition.versions:
# mod = ClassLoader.load_module(f"{module_name}.{version['path']}")
# self._plugins[module_name] = mod
# return mod
def register_package(self, package_name: str) -> Sequence[ModuleType]:
"""Register all modules (sub-packages) under a given package name."""
try:
module_names = ClassLoader.scan_subpackages(package_name)
except ModuleLoadError:
LOGGER.error("Plugin module package not found: %s", package_name)
module_names = []
return list(
filter(
None,
(
self.register_plugin(module_name)
for module_name in module_names
if module_name.split(".")[-1] != "tests"
),
)
)
async def init_context(self, context: InjectionContext):
"""Call plugin setup methods on the current context."""
for plugin in self._plugins.values():
if hasattr(plugin, "setup"):
await plugin.setup(context)
else:
await self.load_protocols(context, plugin)
# register event handlers for each protocol, if providedf
self.register_protocol_events(context)
async def load_protocol_version(
self,
context: InjectionContext,
mod: ModuleType,
version_definition: dict = None,
):
"""Load a particular protocol version."""
protocol_registry = context.inject(ProtocolRegistry)
goal_code_registry = context.inject(GoalCodeRegistry)
if hasattr(mod, "MESSAGE_TYPES"):
protocol_registry.register_message_types(
mod.MESSAGE_TYPES, version_definition=version_definition
)
if hasattr(mod, "CONTROLLERS"):
protocol_registry.register_controllers(
mod.CONTROLLERS, version_definition=version_definition
)
goal_code_registry.register_controllers(mod.CONTROLLERS)
async def load_protocols(self, context: InjectionContext, plugin: ModuleType):
"""For modules that don't implement setup, register protocols manually."""
# If this module contains message_types, then assume that
# this is a valid module of the old style (not versioned)
try:
mod = ClassLoader.load_module(plugin.__name__ + ".message_types")
except ModuleLoadError as e:
LOGGER.error("Error loading plugin module message types: %s", e)
return
if mod:
await self.load_protocol_version(context, mod)
else:
# Otherwise, try check for definition.py for versioned
# protocol packages
try:
definition = ClassLoader.load_module(plugin.__name__ + ".definition")
except ModuleLoadError as e:
LOGGER.error("Error loading plugin definition module: %s", e)
return
if definition:
for protocol_version in definition.versions:
try:
mod = ClassLoader.load_module(
f"{plugin.__name__}.{protocol_version['path']}"
+ ".message_types"
)
await self.load_protocol_version(context, mod, protocol_version)
except ModuleLoadError as e:
LOGGER.error("Error loading plugin module message types: %s", e)
return
async def register_admin_routes(self, app):
"""Call route registration methods on the current context."""
for plugin in self._plugins.values():
definition = ClassLoader.load_module("definition", plugin.__name__)
if definition:
# Load plugin routes that are in a versioned package.
for plugin_version in definition.versions:
try:
mod = ClassLoader.load_module(
f"{plugin.__name__}.{plugin_version['path']}.routes"
)
except ModuleLoadError as e:
LOGGER.error("Error loading admin routes: %s", e)
continue
if mod and hasattr(mod, "register"):
await mod.register(app)
else:
# Load plugin routes that aren't in a versioned package.
try:
mod = ClassLoader.load_module(f"{plugin.__name__}.routes")
except ModuleLoadError as e:
LOGGER.error("Error loading admin routes: %s", e)
continue
if mod and hasattr(mod, "register"):
await mod.register(app)
def register_protocol_events(self, context: InjectionContext):
"""Call route register_events methods on the current context."""
event_bus = context.inject_or(EventBus)
if not event_bus:
LOGGER.error("No event bus in context")
return
for plugin in self._plugins.values():
definition = ClassLoader.load_module("definition", plugin.__name__)
if definition:
# Load plugin routes that are in a versioned package.
for plugin_version in definition.versions:
try:
mod = ClassLoader.load_module(
f"{plugin.__name__}.{plugin_version['path']}.routes"
)
except ModuleLoadError as e:
LOGGER.error("Error loading admin routes: %s", e)
continue
if mod and hasattr(mod, "register_events"):
mod.register_events(event_bus)
else:
# Load plugin routes that aren't in a versioned package.
try:
mod = ClassLoader.load_module(f"{plugin.__name__}.routes")
except ModuleLoadError as e:
LOGGER.error("Error loading admin routes: %s", e)
continue
if mod and hasattr(mod, "register_events"):
mod.register_events(event_bus)
def post_process_routes(self, app):
"""Call route binary file response OpenAPI fixups if applicable."""
for plugin in self._plugins.values():
definition = ClassLoader.load_module("definition", plugin.__name__)
if definition:
# Set binary file responses for routes that are in a versioned package.
for plugin_version in definition.versions:
try:
mod = ClassLoader.load_module(
f"{plugin.__name__}.{plugin_version['path']}.routes"
)
except ModuleLoadError as e:
LOGGER.error("Error loading admin routes: %s", e)
continue
if mod and hasattr(mod, "post_process_routes"):
mod.post_process_routes(app)
else:
# Set binary file responses for routes not in a versioned package.
try:
mod = ClassLoader.load_module(f"{plugin.__name__}.routes")
except ModuleLoadError as e:
LOGGER.error("Error loading admin routes: %s", e)
continue
if mod and hasattr(mod, "post_process_routes"):
mod.post_process_routes(app)
def __repr__(self) -> str:
"""Return a string representation for this class."""
return "<{}>".format(self.__class__.__name__)
|
d502cc33eae57f42e90d698243e3b631e3ce10d4
|
0dbfceb20b5fdcb3410e34bd68a0e23e54d2fc7a
|
/omnizart/patch_cnn/inference.py
|
797b292fdc74dd313c352e10200d6ed10a884f79
|
[
"MIT"
] |
permissive
|
Music-and-Culture-Technology-Lab/omnizart
|
e626e2ca2a5efa7ccc4ae85898cb8e227d8ff18a
|
735c7f9f913611cbf7190d1f6fdbac93b14d49f7
|
refs/heads/master
| 2023-08-08T03:24:59.082617
| 2023-08-04T09:34:56
| 2023-08-04T09:34:56
| 290,497,530
| 1,455
| 94
|
MIT
| 2023-08-18T16:39:42
| 2020-08-26T13:01:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,135
|
py
|
inference.py
|
import numpy as np
def inference(pred, mapping, zzz, cenf, threshold=0.5, max_method="posterior"):
"""Infers pitch contour from the model prediction.
Parameters
----------
pred:
The predicted results of the model.
mapping: 2D numpy array
The original frequency and time index of patches.
See ``omnizart.feature.cfp.extract_patch_cfp`` for more details.
zzz: 2D numpy array
The original CFP feature.
cenf: list[float]
Center frequencies in Hz of each frequency index.
threshold: float
Threshold for filtering value of predictions.
max_method: {'posterior', 'prior'}
The approach for determine the frequency. Method of *posterior* assigns the
frequency value according to the given ``mapping`` parameter, and *prior*
uses the given ``zzz`` feature for the determination.
Returns
-------
contour: 1D numpy array
Sequence of freqeuncies in Hz, representing the inferred pitch contour.
"""
pred = pred[:, 1]
pred_idx = np.where(pred > threshold)
probs = np.expand_dims(pred[pred_idx[0]], axis=-1)
maps = mapping[pred_idx[0]]
maps = np.concatenate([maps, probs], axis=1)
maps = maps[maps[:, 1].argsort()]
contour = np.zeros(int(np.max(maps)) + 1)
for tidx in range(len(probs)):
candidate = maps[np.where(maps[:, 1] == tidx)[0]]
if len(candidate) < 1:
continue
if len(candidate) == 1:
contour[int(candidate[0, 1])] = candidate[0, 0]
else:
if max_method == "posterior":
freq_idx = np.where(candidate[:, 2] == np.max(candidate[:, 2]))[0]
elif max_method == "prior":
freq_idx = zzz[candidate[:, 0].astype('int'), tidx].argmax(axis=0)
else:
raise ValueError(f"Invalid maximum method: {max_method}")
freq_idx = int(freq_idx)
contour[int(candidate[freq_idx, 1])] = candidate[freq_idx, 0]
for idx, cont in enumerate(contour):
if cont > 1:
contour[idx] = cenf[int(cont)]
return contour
|
11645d2419d81fe348df6c751d97d0773d4ff143
|
c71b7a8a9dd7bf7c9496b1df2acc1e52a2a913d0
|
/onadata/apps/logger/fields.py
|
3b9db6acba3c5460836d9f37898e2c1be97d2442
|
[
"BSD-2-Clause"
] |
permissive
|
kobotoolbox/kobocat
|
a5c6fb6a9d3dabe71b5e3c082e4261c4475cbf7f
|
b8d93d4da649f323af111cf7247206554be7c8b1
|
refs/heads/main
| 2023-08-10T00:05:49.384348
| 2023-07-06T04:47:59
| 2023-07-06T04:47:59
| 14,497,749
| 101
| 135
|
BSD-2-Clause
| 2023-09-13T14:57:13
| 2013-11-18T16:16:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,486
|
py
|
fields.py
|
# coding: utf-8
from collections.abc import Callable
from django.core.exceptions import FieldError
from django.db import models
class LazyDefaultBooleanField(models.PositiveSmallIntegerField):
"""
Allows specifying a default value for a new field without having to rewrite
every row in the corresponding table when migrating the database.
Whenever the database contains a null:
1. The field will present the default value instead of None;
2. The field will overwrite the null with the default value if the
instance it belongs to is saved.
models.BooleanField can't be nullable, so we use models.IntegerField to mimic
models.BooleanField behaviour
Based on `kpi.fields.LazyDefaultJSONBField`
"""
def __init__(self, *args, **kwargs):
if kwargs.get('null', False):
raise FieldError('Do not manually specify null=True for a '
'LazyDefaultBooleanField')
self.lazy_default = kwargs.get('default')
if self.lazy_default is None:
raise FieldError('LazyDefaultBooleanField requires a default that '
'is not None')
elif not isinstance(self.lazy_default, bool):
raise FieldError("LazyDefaultBooleanField requires the default value "
"to be a boolean")
kwargs['null'] = True
kwargs['default'] = None
super().__init__(*args, **kwargs)
def _get_lazy_default(self):
if isinstance(self.lazy_default, Callable):
return self.lazy_default()
else:
return self.lazy_default
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs['default'] = self.lazy_default
del kwargs['null']
return name, path, args, kwargs
def from_db_value(self, value, *args, **kwargs):
if value is None:
return self._get_lazy_default()
# We want to play with booleans on Python side.
return True if value == 1 else False
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname)
if value is None:
setattr(model_instance, self.attname, self._get_lazy_default())
value = self.__to_integer(self._get_lazy_default())
return value
def __to_integer(self, value):
# We want to play with integers on DB side.
return 1 if value is True else 0
|
5af8759b1e566379491b51de1f207aaa87f4ea3b
|
1cd58db96a3cdaa3162b0b73bc27c0a26393edb6
|
/tests/conftest.py
|
5b5feb5d35ae1f5cbdc44f54c28668405a8423db
|
[
"MIT"
] |
permissive
|
adamchainz/patchy
|
6cab92e51e6b3e6b3b266df97417679f4f8d9409
|
fd1570db651328e199252063b32e0cc745230733
|
refs/heads/main
| 2023-08-20T00:21:40.770764
| 2023-08-15T08:36:26
| 2023-08-15T08:36:26
| 35,655,230
| 164
| 15
|
MIT
| 2023-09-12T05:38:50
| 2015-05-15T05:25:18
|
Python
|
UTF-8
|
Python
| false
| false
| 159
|
py
|
conftest.py
|
from __future__ import annotations
import pytest
import patchy.api
@pytest.fixture(autouse=True)
def clear_cache():
patchy.api._patching_cache.clear()
|
42f9bc2ffd1b7f71090ce3628c9e16a65e4db4b2
|
8cc3498e311d15c9a4394aaa341ef489b482dbe6
|
/test/language/expressions/python/CastUInt8ToUInt64Test.py
|
f87d9e7f32da635b71a147ccb03e8b2400d3e95f
|
[
"BSD-3-Clause"
] |
permissive
|
ndsev/zserio
|
3e55c064f72e86219a6da297f116d3dbb565a9a9
|
c540c4a97fee4e08bfc6669a2cec0d2b8282d8f6
|
refs/heads/master
| 2023-08-24T14:56:10.750155
| 2023-08-11T19:36:54
| 2023-08-11T19:36:54
| 141,550,444
| 113
| 23
|
BSD-3-Clause
| 2023-08-30T11:14:47
| 2018-07-19T08:44:23
|
Java
|
UTF-8
|
Python
| false
| false
| 821
|
py
|
CastUInt8ToUInt64Test.py
|
import unittest
from testutils import getZserioApi
class CastUInt8ToUInt64Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "expressions.zs").cast_uint8_to_uint64
def testUInt64ValueUsingUInt8Value(self):
uint8Value = 0xBA
castUInt8ToUInt64Expression = self.api.CastUInt8ToUInt64Expression(uint8Value, False)
expectedUInt64Value = uint8Value
self.assertEqual(expectedUInt64Value, castUInt8ToUInt64Expression.uint64_value())
def testUint64ValueUsingConstant(self):
uint8Value = 0xBA
castUInt8ToUInt64Expression = self.api.CastUInt8ToUInt64Expression(uint8Value, use_constant_=True)
expectedUInt64Value = 1
self.assertEqual(expectedUInt64Value, castUInt8ToUInt64Expression.uint64_value())
|
41e4c9aa2fe12a99417781b45678e3e2df713732
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/framework/docstr/convolution.py
|
ee0e3d5950a97d064d148ec5b9f7f78f7f8c1fff
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 2,001
|
py
|
convolution.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.nn.functional.fold,
r"""
fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1)
The documentation is referenced from: https://pytorch.org/docs/1.10/generated/torch.nn.functional.fold.html.
Combines an array of sliding local blocks into a large containing tensor.
.. warning::
Currently, only 3-D input tensors (batched image-like tensors) are supported, and only unbatched (3D)
or batched (4D) image-like output tensors are supported.
See :class:`oneflow.nn.Fold` for details.
""",
)
add_docstr(
oneflow.nn.functional.unfold,
r"""
unfold(input, kernel_size, dilation=1, padding=0, stride=1)
The documentation is referenced from: https://pytorch.org/docs/1.10/generated/torch.nn.functional.unfold.html.
Extracts sliding local blocks from a batched input tensor.
.. warning::
Currently, only 4-D input tensors (batched image-like tensors) are supported.
.. warning::
More than one element of the unfolded tensor may refer to a single
memory location. As a result, in-place operations (especially ones that
are vectorized) may result in incorrect behavior. If you need to write
to the tensor, please clone it first.
See :class:`oneflow.nn.Unfold` for details.
""",
)
|
79dc04d474e5a89c3f8aa7f52638c01cda8d87ca
|
51c102c55a94574f6b093afff8b023773b68561b
|
/model.py
|
f981741254e25a7689fac4cb8c5563359414de40
|
[] |
no_license
|
DSKSD/RNN-for-Joint-NLU
|
12b2543df4cfa2ac762c592a5a882edd30a6d199
|
a4c07c8b9e933e57476cd790bef8826c53ef046d
|
refs/heads/master
| 2021-06-13T19:57:14.657354
| 2017-09-18T05:30:52
| 2017-09-18T05:30:52
| 99,791,718
| 215
| 65
| null | 2021-06-05T14:56:46
| 2017-08-09T09:41:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,395
|
py
|
model.py
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
USE_CUDA = torch.cuda.is_available()
class Encoder(nn.Module):
def __init__(self, input_size,embedding_size, hidden_size,batch_size=16 ,n_layers=1):
super(Encoder, self).__init__()
self.input_size = input_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.n_layers = n_layers
self.batch_size=batch_size
self.embedding = nn.Embedding(input_size, embedding_size)
self.lstm = nn.LSTM(embedding_size, hidden_size, n_layers, batch_first=True,bidirectional=True)
def init_weights(self):
self.embedding.weight.data.uniform_(-0.1, 0.1)
#self.lstm.weight.data.
def init_hidden(self,input):
hidden = Variable(torch.zeros(self.n_layers*2, input.size(0), self.hidden_size)).cuda() if USE_CUDA else Variable(torch.zeros(self.n_layers*2, input.size(0), self.hidden_size))
context = Variable(torch.zeros(self.n_layers*2, input.size(0), self.hidden_size)).cuda() if USE_CUDA else Variable(torch.zeros(self.n_layers*2, input.size(0), self.hidden_size))
return (hidden,context)
def forward(self, input,input_masking):
"""
input : B,T (LongTensor)
input_masking : B,T (PAD 마스킹한 ByteTensor)
<PAD> 제외한 리얼 Context를 다시 만들어서 아웃풋으로
"""
self.hidden = self.init_hidden(input)
embedded = self.embedding(input)
output, self.hidden = self.lstm(embedded, self.hidden)
real_context=[]
for i,o in enumerate(output): # B,T,D
real_length = input_masking[i].data.tolist().count(0) # 실제 길이
real_context.append(o[real_length-1])
return output, torch.cat(real_context).view(input.size(0),-1).unsqueeze(1)
class Decoder(nn.Module):
def __init__(self,slot_size,intent_size,embedding_size,hidden_size,batch_size=16,n_layers=1,dropout_p=0.1):
super(Decoder, self).__init__()
self.hidden_size = hidden_size
self.slot_size = slot_size
self.intent_size = intent_size
self.n_layers = n_layers
self.dropout_p = dropout_p
self.embedding_size = embedding_size
self.batch_size = batch_size
# Define the layers
self.embedding = nn.Embedding(self.slot_size, self.embedding_size) #TODO encoder와 공유하도록 하고 학습되지 않게..
#self.dropout = nn.Dropout(self.dropout_p)
self.lstm = nn.LSTM(self.embedding_size+self.hidden_size*2, self.hidden_size, self.n_layers, batch_first=True)
self.attn = nn.Linear(self.hidden_size,self.hidden_size) # Attention
self.slot_out = nn.Linear(self.hidden_size*2, self.slot_size)
self.intent_out = nn.Linear(self.hidden_size*2,self.intent_size)
def init_weights(self):
self.embedding.weight.data.uniform_(-0.1, 0.1)
#self.out.bias.data.fill_(0)
#self.out.weight.data.uniform_(-0.1, 0.1)
#self.lstm.weight.data.
def Attention(self, hidden, encoder_outputs, encoder_maskings):
"""
hidden : 1,B,D
encoder_outputs : B,T,D
encoder_maskings : B,T # ByteTensor
"""
hidden = hidden.squeeze(0).unsqueeze(2) # 히든 : (1,배치,차원) -> (배치,차원,1)
batch_size = encoder_outputs.size(0) # B
max_len = encoder_outputs.size(1) # T
energies = self.attn(encoder_outputs.contiguous().view(batch_size*max_len,-1)) # B*T,D -> B*T,D
energies = energies.view(batch_size,max_len,-1) # B,T,D (배치,타임,차원)
attn_energies = energies.bmm(hidden).transpose(1,2) # B,T,D * B,D,1 --> B,1,T
attn_energies = attn_energies.squeeze(1).masked_fill(encoder_maskings,-1e12) # PAD masking
alpha = F.softmax(attn_energies) # B,T
alpha = alpha.unsqueeze(1) # B,1,T
context = alpha.bmm(encoder_outputs) # B,1,T * B,T,D => B,1,D
return context # B,1,D
def init_hidden(self,input):
hidden = Variable(torch.zeros(self.n_layers*1, input.size(0), self.hidden_size)).cuda() if USE_CUDA else Variable(torch.zeros(self.n_layers*2,input.size(0), self.hidden_size))
context = Variable(torch.zeros(self.n_layers*1, input.size(0), self.hidden_size)).cuda() if USE_CUDA else Variable(torch.zeros(self.n_layers*2, input.size(0), self.hidden_size))
return (hidden,context)
def forward(self, input,context,encoder_outputs,encoder_maskings,training=True):
"""
input : B,L(length)
enc_context : B,1,D
"""
# Get the embedding of the current input word
embedded = self.embedding(input)
hidden = self.init_hidden(input)
decode=[]
aligns = encoder_outputs.transpose(0,1)
length = encoder_outputs.size(1)
for i in range(length): # Input_sequence와 Output_sequence의 길이가 같기 때문..
aligned = aligns[i].unsqueeze(1)# B,1,D
_, hidden = self.lstm(torch.cat((embedded,context,aligned),2), hidden) # input, context, aligned encoder hidden, hidden
# for Intent Detection
if i==0:
intent_hidden = hidden[0].clone()
intent_context = self.Attention(intent_hidden, encoder_outputs,encoder_maskings)
concated = torch.cat((intent_hidden,intent_context.transpose(0,1)),2) # 1,B,D
intent_score = self.intent_out(concated.squeeze(0)) # B,D
concated = torch.cat((hidden[0],context.transpose(0,1)),2)
score = self.slot_out(concated.squeeze(0))
softmaxed = F.log_softmax(score)
decode.append(softmaxed)
_,input = torch.max(softmaxed,1)
embedded = self.embedding(input.unsqueeze(1))
# 그 다음 Context Vector를 Attention으로 계산
context = self.Attention(hidden[0], encoder_outputs,encoder_maskings)
# 요고 주의! time-step을 column-wise concat한 후, reshape!!
slot_scores = torch.cat(decode,1)
return slot_scores.view(input.size(0)*length,-1), intent_score
|
cd978201d066c8562dc9b86c0a3daa7fcb38240f
|
2481cde6506743565dff2b405a2396daf208ab3e
|
/src/true_coders/migrations/0006_auto_20180204_1542.py
|
06e1c71c194d8f8641cdef118da3097926f53a35
|
[
"Apache-2.0"
] |
permissive
|
aropan/clist
|
4819a3036d179595e4df8c646aff2ed593b9dad3
|
5c805b2af71acee97f993f19d8d4e229f7f5b411
|
refs/heads/master
| 2023-08-31T11:15:17.987776
| 2023-08-27T21:51:14
| 2023-08-27T21:52:16
| 187,111,853
| 276
| 35
|
Apache-2.0
| 2023-09-06T18:42:53
| 2019-05-16T22:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 541
|
py
|
0006_auto_20180204_1542.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-04 15:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('true_coders', '0005_auto_20180203_1628'),
]
operations = [
migrations.AlterField(
model_name='coder',
name='organization',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='true_coders.Organization'),
),
]
|
0aaa74fa4246329a39b800531fbd2fdaae798494
|
6ffd23679939f59f0a09c9507a126ba056b239d7
|
/imperative/python/megengine/functional/quantized.py
|
79f8daa26254394b881452554cdd1a85f79246b6
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
MegEngine/MegEngine
|
74c1c9b6022c858962caf7f27e6f65220739999f
|
66b79160d35b2710c00befede0c3fd729109e474
|
refs/heads/master
| 2023-08-23T20:01:32.476848
| 2023-08-01T07:12:01
| 2023-08-11T06:04:12
| 248,175,118
| 5,697
| 585
|
Apache-2.0
| 2023-07-19T05:11:07
| 2020-03-18T08:21:58
|
C++
|
UTF-8
|
Python
| false
| false
| 7,560
|
py
|
quantized.py
|
# pylint: disable=too-many-lines
from typing import Tuple, Union
from ..core import _config
from ..core._imperative_rt.core2 import apply
from ..core.ops import builtin
from ..tensor import Tensor
from ..utils.tuple_function import _pair, _pair_nonzero
from .debug_param import get_execution_strategy
def conv_bias_activation(
inp: Tensor,
weight: Tensor,
bias: Tensor,
dtype=None,
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
nonlinear_mode="identity",
conv_mode="cross_correlation",
compute_mode="default",
) -> Tensor:
r"""Convolution bias with activation operation, only for inference.
Args:
inp: feature map of the convolution operation.
weight: convolution kernel.
bias: bias added to the result of convolution
stride: stride of the 2D convolution operation. Default: 1
padding: size of the paddings added to the input on both sides
of its spatial dimensions. Only zero-padding is supported. Default: 0
dilation: dilation of the 2D convolution operation. Default: 1
groups: number of groups into which the input and output channels are divided,
so as to perform a "grouped convolution". When ``groups`` is not 1,
``in_channels`` and ``out_channels`` must be divisible by ``groups``,
and the shape of weight should be `(groups, out_channel // groups,
in_channels // groups, height, width)`.
conv_mode: supports 'cross_correlation' or 'convolution'. Default:
'cross_correlation'
dtype: support for ``np.dtype``, Default: np.int8
compute_mode: when set to "default", no special requirements will be
placed on the precision of intermediate results. When set to "float32",
"float32" would be used for accumulator and intermediate result,
but only effective when input and output are of float16 dtype.
"""
ph, pw = _pair(padding)
sh, sw = _pair_nonzero(stride)
dh, dw = _pair_nonzero(dilation)
sparse_type = "dense" if groups == 1 else "group"
compute_mode = _config._get_actual_op_param(compute_mode, _config.__compute_mode)
op = builtin.ConvBias(
stride_h=sh,
stride_w=sw,
pad_h=ph,
pad_w=pw,
dilate_h=dh,
dilate_w=dw,
dtype=dtype,
strategy=get_execution_strategy(),
nonlineMode=nonlinear_mode,
mode=conv_mode,
compute_mode=compute_mode,
sparse=sparse_type,
)
(outputs,) = apply(op, inp, weight, bias)
return outputs
def batch_conv_bias_activation(
inp: Tensor,
weight: Tensor,
bias: Tensor,
dtype=None,
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
nonlinear_mode="identity",
conv_mode="cross_correlation",
compute_mode="default",
) -> Tensor:
r"""Batch convolution bias with activation operation, only for inference.
Args:
inp: feature map of the convolution operation.
weight: convolution kernel in batched way.
bias: bias added to the result of convolution
stride: stride of the 2D convolution operation. Default: 1
padding: size of the paddings added to the input on both sides
of its spatial dimensions. Only zero-padding is supported. Default: 0
dilation: dilation of the 2D convolution operation. Default: 1
groups: number of groups into which the input and output channels are divided,
so as to perform a "grouped convolution". When ``groups`` is not 1,
``in_channels`` and ``out_channels`` must be divisible by ``groups``,
and the shape of weight should be `(groups, out_channel // groups,
in_channels // groups, height, width)`.
conv_mode: supports 'cross_correlation' or 'convolution'. Default:
'cross_correlation'
dtype: support for ``np.dtype``, Default: np.int8
compute_mode: when set to "default", no special requirements will be
placed on the precision of intermediate results. When set to "float32",
"float32" would be used for accumulator and intermediate result,
but only effective when input and output are of float16 dtype.
"""
ph, pw = _pair(padding)
sh, sw = _pair_nonzero(stride)
dh, dw = _pair_nonzero(dilation)
sparse_type = "dense" if groups == 1 else "group"
compute_mode = _config._get_actual_op_param(compute_mode, _config.__compute_mode)
op = builtin.BatchConvBias(
stride_h=sh,
stride_w=sw,
pad_h=ph,
pad_w=pw,
dilate_h=dh,
dilate_w=dw,
dtype=dtype,
strategy=get_execution_strategy(),
nonlineMode=nonlinear_mode,
mode=conv_mode,
compute_mode=compute_mode,
sparse=sparse_type,
)
(outputs,) = apply(op, inp, weight, bias)
return outputs
def conv_transpose2d(
inp: Tensor,
weight: Tensor,
bias: Tensor = None,
dtype=None,
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
output_padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode="cross_correlation",
compute_mode="default",
) -> Tensor:
assert (
conv_mode.lower() == "cross_correlation"
or conv_mode.name == "CROSS_CORRELATION"
)
assert compute_mode.lower() == "default" or compute_mode.name == "DEFAULT"
if groups != 1:
raise NotImplementedError(
"group quantized transposed conv2d is not supported yet."
)
if bias is not None:
raise NotImplementedError(
"bias of quantized transposed conv2d is not supported yet."
)
pad_h, pad_w = _pair(padding)
output_pad_h, output_pad_w = _pair(output_padding)
stride_h, stride_w = _pair_nonzero(stride)
dilate_h, dilate_w = _pair_nonzero(dilation)
compute_mode = _config._get_actual_op_param(compute_mode, _config.__compute_mode)
# should be replaced by Op with bias such as ConvolutionBackwardDataBias
op = builtin.ConvolutionBackwardData(
stride_h=stride_h,
stride_w=stride_w,
pad_h=pad_h,
pad_w=pad_w,
dilate_h=dilate_h,
dilate_w=dilate_w,
strategy=get_execution_strategy(),
dtype=dtype,
compute_mode=compute_mode,
mode=conv_mode,
)
if output_pad_h != 0 or output_pad_h != 0:
assert (
output_pad_h < stride[0]
), "output_padding[0] shoule be less than stride[0]"
assert (
output_pad_w < stride[1]
), "output_padding[1] shoule be less than stride[1]"
Hout = (
(inp.shape[2] - 1) * stride[0]
- 2 * padding[0]
+ dilation[0] * (weight.shape[2] - 1)
+ output_pad_h
+ 1
)
Wout = (
(inp.shape[3] - 1) * stride[1]
- 2 * padding[1]
+ dilation[1] * (weight.shape[3] - 1)
+ output_pad_w
+ 1
)
output_shape = [inp.shape[0], weight.shape[1], Hout, Wout]
output_shape = Tensor(output_shape)
(output,) = apply(op, weight, inp, output_shape)
else:
(output,) = apply(op, weight, inp)
return output
|
eed321a37cdc78a8065e76823c2fa8dd0b1389af
|
c3e0a6919caf85c35239ef23084df9bbf8dd61c3
|
/pypeit/scripts/compare_sky.py
|
eabc26f2e7a9e693c790e756b3d78b766054fbd9
|
[
"BSD-3-Clause"
] |
permissive
|
pypeit/PypeIt
|
6eb9e5afd62acc9d363e497cd9e367d620f86ea4
|
0d2e2196afc6904050b1af4d572f5c643bb07e38
|
refs/heads/release
| 2023-08-25T21:15:59.113114
| 2023-06-04T15:23:39
| 2023-06-04T15:23:39
| 36,958,428
| 136
| 98
|
BSD-3-Clause
| 2023-09-12T17:42:15
| 2015-06-05T22:25:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,715
|
py
|
compare_sky.py
|
"""
Plots an extracted sky spectrum with an archived one. Probably most useful for
exploring sky spectra in the blue
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
from pypeit.scripts import scriptbase
class CompareSky(scriptbase.ScriptBase):
@classmethod
def get_parser(cls, width=None):
parser = super().get_parser(description='Compare the extracted sky spectrum against an '
'archived sky model maintained by PypeIt.',
width=width)
parser.add_argument('file', type=str, help='spec1d Spectral file')
parser.add_argument('skyfile', type=str,
help='Archived PypeIt sky file (e.g. paranal_sky.fits)')
parser.add_argument('--exten', type=int, help='FITS extension')
parser.add_argument('--optimal', default=False, action='store_true',
help='Show Optimal? Default is boxcar')
parser.add_argument('--scale_user', default=1., type=float,
help='Scale user spectrum by a factor')
parser.add_argument('--test', default=False, action='store_true',
help='Load files but do not show plot')
return parser
# Script to run XSpec from the command line or ipython
@staticmethod
def main(args):
import os
from matplotlib import pyplot as plt
from linetools.spectra.io import readspec
from pypeit import data
# Extension
exten = args.exten if args.exten is not None else 1
# Read spec keywords
ikwargs = {}
if args.optimal:
ikwargs['wave_tag'] = 'OPT_WAVE'
ikwargs['flux_tag'] = 'OPT_COUNTS_SKY'
ikwargs['ivar_tag'] = 'OPT_COUNTS_IVAR'
ikwargs['sig_tag'] = 'OPT_COUNTS_SIG'
else:
ikwargs['wave_tag'] = 'BOX_WAVE'
ikwargs['flux_tag'] = 'BOX_COUNTS_SKY'
ikwargs['ivar_tag'] = 'BOX_COUNTS_IVAR'
ikwargs['sig_tag'] = 'BOX_COUNTS_SIG'
# Load user file
user_sky = readspec(args.file, exten=exten, **ikwargs)
# Load sky spec
arx_sky = data.load_sky_spectrum(args.skyfile)
# Plot
plt.clf()
plt.plot(user_sky.wavelength, user_sky.flux*args.scale_user, 'k-', label='user')
plt.plot(arx_sky.wavelength, arx_sky.flux, 'b-', label='archive')
legend = plt.legend(loc='upper left', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize='small', numpoints=1)
if not args.test:
plt.show()
|
60cc110357c878239c2c701a26686f64382c69a9
|
d2621d10d6d0aa4fcecbb11c281e3dd680b985fc
|
/test/pytest/test_onnx.py
|
dd466544eec0c7d2de14640d385ce1c1534524aa
|
[
"Apache-2.0"
] |
permissive
|
pytorch/serve
|
7b562a4d6372e77ce28fc71a5b8d5455c6f02290
|
242895c6b4596c4119ec09d6139e627c5dd696b6
|
refs/heads/master
| 2023-08-31T05:24:10.950144
| 2023-08-31T02:49:22
| 2023-08-31T02:49:22
| 212,488,700
| 3,689
| 895
|
Apache-2.0
| 2023-09-13T22:34:31
| 2019-10-03T03:17:43
|
Java
|
UTF-8
|
Python
| false
| false
| 2,136
|
py
|
test_onnx.py
|
import subprocess
import torch
import torch.onnx
class ToyModel(torch.nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.linear1 = torch.nn.Linear(1, 1)
self.linear2 = torch.nn.Linear(1, 1)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
# For a custom model you still need to manually author your converter, as far as I can tell there isn't a nice out of the box that exists
def test_convert_to_onnx():
model = ToyModel()
dummy_input = torch.randn(1, 1)
model_path = "linear.onnx"
# set the model to inference mode
model.eval()
# Let's create a dummy input tensor
# Export the model
torch.onnx.export(
model, # model being run
dummy_input, # model input (or a tuple for multiple inputs)
model_path, # where to save the model
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=["modelInput"], # the model's input names
output_names=["modelOutput"], # the model's output names
dynamic_axes={
"modelInput": {0: "batch_size"}, # variable length axes
"modelOutput": {0: "batch_size"},
},
)
def test_model_packaging_and_start():
subprocess.run("mkdir model_store", shell=True)
subprocess.run(
"torch-model-archiver -f --model-name onnx --version 1.0 --serialized-file linear.onnx --export-path model_store --handler onnx_handler.py",
shell=True,
check=True,
)
def test_model_start():
subprocess.run(
"torchserve --start --ncs --model-store model_store --models onnx.mar",
shell=True,
check=True,
)
def test_inference():
subprocess.run(
"curl -X POST http://127.0.0.1:8080/predictions/onnx --data-binary '1'",
shell=True,
)
def test_stop():
subprocess.run("torchserve --stop", shell=True, check=True)
|
ef9c4b0f10a3c0a31d942f8c4a3c446528a181c6
|
307d3837d31f9e3728af2b62ca51ebf63fe6ec6b
|
/wlwl1011/BOJ/DP/1932.py
|
f95d7b8f4c5d65fc4efe8cc75b36ee22f717f5e3
|
[] |
no_license
|
ellynhan/challenge100-codingtest-study
|
905043497d154b8a7333ca536e536d013f6e7454
|
bcdc6d04f13b12ba80b42e066f9d244d7c2cc698
|
refs/heads/master
| 2023-09-01T14:10:13.481013
| 2023-08-27T14:38:52
| 2023-08-27T14:38:52
| 401,561,230
| 162
| 176
| null | 2023-09-09T14:56:25
| 2021-08-31T03:30:36
|
C++
|
UTF-8
|
Python
| false
| false
| 755
|
py
|
1932.py
|
import sys, os, io, atexit
input = lambda : sys.stdin.readline().rstrip('\r\n')
stdout = io.BytesIO()
sys.stdout.write = lambda s : stdout.write(s.encode("ascii"))
atexit.register(lambda : os.write(1, stdout.getvalue()))
N = int(input())
arr = []
for i in range(N):
arr.append(list(map(int, input().split())))
# for i in range(N):
# print(arr[i])
for i in range(1,N):
#print('index:',i)
for j in range(len(arr[i])):
#print(len(arr[i]))
if j == 0:
arr[i][j] = arr[i-1][j] + arr[i][j]
elif j == len(arr[i])-1:
arr[i][j] = arr[i-1][j-1] + arr[i][j]
else:
arr[i][j] = max(arr[i-1][j-1],arr[i-1][j]) + arr[i][j]
#print(arr[i])
print(max(arr[N-1]))
|
4f2eeaefbab2fb07878ee10a4083c3382f16b206
|
fd6e382dd762c1a7c09af7c0112b9d1469f7d7e3
|
/tests/test_metrics.py
|
101aee5f6ad13ec7ba6edca6ead4af8bfe425ff1
|
[
"MIT"
] |
permissive
|
salu133445/pypianoroll
|
304f5121abc1d2661ca74792bdd054f622e3ddc9
|
7bb6dba37da646e13789a083e6ec006a0424730c
|
refs/heads/main
| 2023-07-13T07:28:05.449181
| 2023-06-26T08:16:47
| 2023-06-26T08:16:47
| 117,360,127
| 126
| 21
|
MIT
| 2023-06-26T12:13:12
| 2018-01-13T16:42:39
|
Python
|
UTF-8
|
Python
| false
| false
| 973
|
py
|
test_metrics.py
|
"""Test cases for metrics."""
from math import isnan
import numpy as np
from pytest import fixture
from pypianoroll.metrics import (
empty_beat_rate,
n_pitch_classes_used,
n_pitches_used,
pitch_range,
pitch_range_tuple,
)
@fixture
def pianoroll():
pianoroll = np.zeros((96, 128), np.uint8)
pianoroll[:24, [60, 64, 67, 72]] = 100
pianoroll[73:96, [72, 76, 79, 84]] = 80
return pianoroll
def test_empty_beat_rate(pianoroll):
assert empty_beat_rate(pianoroll, 24) == 0.5
def test_n_pitches_used(pianoroll):
assert n_pitches_used(pianoroll) == 7
def test_n_pitch_classes_used(pianoroll):
assert n_pitch_classes_used(pianoroll) == 3
def test_pitch_range_tuple(pianoroll):
assert pitch_range_tuple(pianoroll) == (60, 84)
def test_pitch_range(pianoroll):
assert pitch_range(pianoroll) == 24
def test_pitch_range_empty():
pianoroll = np.zeros((96, 128), np.uint8)
assert isnan(pitch_range(pianoroll))
|
ec08dbbdceb74dffe760f5af22f09f3a0b307070
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/tests/tracer/test_single_span_sampling_rules.py
|
f8e37d62ca21704daa799cbe872a0ded710bff1b
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 15,275
|
py
|
test_single_span_sampling_rules.py
|
import sys
import pytest
from ddtrace import Tracer
from ddtrace.constants import SAMPLING_PRIORITY_KEY
from ddtrace.constants import _SINGLE_SPAN_SAMPLING_MAX_PER_SEC
from ddtrace.constants import _SINGLE_SPAN_SAMPLING_MECHANISM
from ddtrace.constants import _SINGLE_SPAN_SAMPLING_RATE
from ddtrace.internal.sampling import SamplingMechanism
from ddtrace.internal.sampling import SpanSamplingRule
from ddtrace.internal.sampling import _get_file_json
from ddtrace.internal.sampling import get_span_sampling_rules
from tests.utils import DummyTracer
from tests.utils import DummyWriter
from ..utils import override_global_config
def traced_function(rule, tracer=None, name="test_name", service="test_service", trace_sampling=False):
if not tracer:
tracer = DummyTracer()
with tracer.trace(name) as span:
span.service = service
# If the trace sampler samples the trace, then we shouldn't add the span sampling tags
# because trace sampling takes precedence over single-span sampling
if trace_sampling:
span.context.sampling_priority = 1
else:
span.context.sampling_priority = 0
if rule.match(span):
rule.sample(span)
return span
def assert_sampling_decision_tags(
span, sample_rate=1.0, mechanism=SamplingMechanism.SPAN_SAMPLING_RULE, limit=None, trace_sampling=False
):
assert span.get_metric(_SINGLE_SPAN_SAMPLING_RATE) == sample_rate
assert span.get_metric(_SINGLE_SPAN_SAMPLING_MECHANISM) == mechanism
assert span.get_metric(_SINGLE_SPAN_SAMPLING_MAX_PER_SEC) == limit
if trace_sampling:
assert span.get_metric(SAMPLING_PRIORITY_KEY) > 0
def test_single_rule_init_via_env():
with override_global_config(
dict(_sampling_rules='[{"sample_rate":0.5,"service":"xyz","name":"abc","max_per_second":100}]')
):
sampling_rules = get_span_sampling_rules()
assert sampling_rules[0]._sample_rate == 0.5
assert sampling_rules[0]._service_matcher.pattern == "xyz"
assert sampling_rules[0]._name_matcher.pattern == "abc"
assert sampling_rules[0]._max_per_second == 100
assert len(sampling_rules) == 1
def test_multiple_rules_init_via_env():
with override_global_config(
dict(
_sampling_rules='[{"service":"xy?","name":"a*c"}, \
{"sample_rate":0.5,"service":"my-service","name":"my-name", "max_per_second":20}]'
)
):
sampling_rules = get_span_sampling_rules()
assert sampling_rules[0]._sample_rate == 1.0
assert sampling_rules[0]._service_matcher.pattern == "xy?"
assert sampling_rules[0]._name_matcher.pattern == "a*c"
assert sampling_rules[0]._max_per_second == -1
assert sampling_rules[1]._sample_rate == 0.5
assert sampling_rules[1]._service_matcher.pattern == "my-service"
assert sampling_rules[1]._name_matcher.pattern == "my-name"
assert sampling_rules[1]._max_per_second == 20
assert len(sampling_rules) == 2
def test_rule_init_via_env_no_name():
with override_global_config(dict(_sampling_rules='[{"service":"xyz", "sample_rate":0.23}]')):
sampling_rules = get_span_sampling_rules()
assert sampling_rules[0]._sample_rate == 0.23
assert sampling_rules[0]._service_matcher.pattern == "xyz"
assert sampling_rules[0]._max_per_second == -1
assert len(sampling_rules) == 1
def test_rule_init_via_env_only_name():
with override_global_config(dict(_sampling_rules='[{"name":"xyz"}]')):
sampling_rules = get_span_sampling_rules()
assert sampling_rules[0]._sample_rate == 1.0
assert sampling_rules[0]._name_matcher.pattern == "xyz"
assert sampling_rules[0]._max_per_second == -1
assert len(sampling_rules) == 1
def test_rule_init_via_env_no_name_or_service():
with override_global_config(dict(_sampling_rules='[{"sample_rate":1.0}]')):
with pytest.raises(ValueError):
get_span_sampling_rules()
def test_rule_init_via_env_service_pattern_contains_unsupported_char():
with override_global_config(dict(_sampling_rules='[{"service":"h[!a]i"}]')):
with pytest.raises(ValueError):
get_span_sampling_rules()
def test_rule_init_via_env_name_pattern_contains_unsupported_char():
with override_global_config(dict(_sampling_rules='[{"name":"h[!a]i"}]')):
with pytest.raises(ValueError):
get_span_sampling_rules()
def test_rule_init_via_env_json_not_list():
with override_global_config(
dict(_sampling_rules='{"sample_rate":0.5,"service":"xyz","name":"abc","max_per_second":100}')
):
with pytest.raises(TypeError):
get_span_sampling_rules()
def test_rule_init_via_env_json_not_valid():
with override_global_config(
dict(_sampling_rules='{"sample_rate":0.5,"service":"xyz","name":"abc","max_per_second":100')
):
with pytest.raises(ValueError):
get_span_sampling_rules()
def test_env_rules_cause_matching_span_to_be_sampled():
"""Test that single span sampling tags are applied to spans that should get sampled when envars set"""
with override_global_config(dict(_sampling_rules='[{"service":"test_service","name":"test_name"}]')):
sampling_rules = get_span_sampling_rules()
assert sampling_rules[0]._service_matcher.pattern == "test_service"
assert sampling_rules[0]._name_matcher.pattern == "test_name"
tracer = Tracer()
tracer.configure(writer=DummyWriter())
span = traced_function(sampling_rules[0], tracer=tracer)
assert_sampling_decision_tags(span)
def test_env_rules_dont_cause_non_matching_span_to_be_sampled():
"""Test that single span sampling tags are not applied to spans that do not match rules"""
with override_global_config(dict(_sampling_rules='[{"service":"test_ser","name":"test_na"}]')):
sampling_rules = get_span_sampling_rules()
assert sampling_rules[0]._service_matcher.pattern == "test_ser"
assert sampling_rules[0]._name_matcher.pattern == "test_na"
tracer = Tracer()
tracer.configure(writer=DummyWriter())
span = traced_function(sampling_rules[0], tracer=tracer)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_single_span_rules_not_applied_when_span_sampled_by_trace_sampling():
"""Test that single span sampling rules aren't applied if a span is already going to be sampled by trace sampler"""
with override_global_config(dict(_sampling_rules='[{"service":"test_service","name":"test_name"}]')):
sampling_rules = get_span_sampling_rules()
assert sampling_rules[0]._service_matcher.pattern == "test_service"
assert sampling_rules[0]._name_matcher.pattern == "test_name"
tracer = Tracer()
tracer.configure(writer=DummyWriter())
span = traced_function(sampling_rules[0], tracer=tracer, trace_sampling=True)
assert sampling_rules[0].match(span) is True
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None, trace_sampling=True)
def test_sampling_rule_init_config_multiple_sampling_rule_json_via_file(tmpdir):
file = tmpdir.join("rules.json")
file.write(
'[{"service":"xy?","name":"a*c"}, \
{"sample_rate":0.5,"service":"my-service","name":"my-name", "max_per_second":"20"}]'
)
with override_global_config(dict(_sampling_rules_file=str(file))):
sampling_rules = _get_file_json()
assert sampling_rules == [
{"service": "xy?", "name": "a*c"},
{"sample_rate": 0.5, "service": "my-service", "name": "my-name", "max_per_second": "20"},
]
with override_global_config(dict(_sampling_rules_file="data/this_doesnt_exist.json")):
exception = FileNotFoundError if sys.version_info.major > 3 else IOError
with pytest.raises(exception):
get_span_sampling_rules()
def test_env_config_takes_precedence_over_file_config(tmpdir, caplog):
file = tmpdir.join("rules.json")
file.write('[{"sample_rate":1.0,"service":"x","name":"ab","max_per_second":1000}]')
with override_global_config(
dict(
_sampling_rules='[{"sample_rate":0.5,"service":"xyz","name":"abc","max_per_second":100}]',
_sampling_rules_file=str(file),
)
):
sampling_rules = get_span_sampling_rules()
assert caplog.record_tuples == [
(
"ddtrace.internal.sampling",
30,
"DD_SPAN_SAMPLING_RULES and DD_SPAN_SAMPLING_RULES_FILE detected. "
"Defaulting to DD_SPAN_SAMPLING_RULES value.",
)
]
assert sampling_rules[0]._sample_rate == 0.5
assert sampling_rules[0]._service_matcher.pattern == "xyz"
assert sampling_rules[0]._name_matcher.pattern == "abc"
assert sampling_rules[0]._max_per_second == 100
assert len(sampling_rules) == 1
def test_single_span_rule_no_match_empty_strings():
rule = SpanSamplingRule(service="", name="", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_single_span_rule_no_match_service():
rule = SpanSamplingRule(service="wrong_service_name", name="test_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_single_span_rule_no_match_name():
rule = SpanSamplingRule(service="test_service", name="wrong_operation_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_single_span_rule_no_match_only_service():
rule = SpanSamplingRule(service="wrong_service_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_single_span_rule_no_match_no_span_name_or_service():
rule = SpanSamplingRule(service="test_service", name="test_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule, name=None, service=None)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_single_span_rule_only_span_service():
rule = SpanSamplingRule(service="test_service", name="test_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule, name=None)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_single_span_rule_only_span_name():
rule = SpanSamplingRule(service="test_service", name="test_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule, service=None)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_single_span_rule_no_match_only_name():
rule = SpanSamplingRule(name="wrong_operation_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_single_span_rule_match():
rule = SpanSamplingRule(service="test_service", name="test_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule)
assert_sampling_decision_tags(span)
def test_single_span_rule_match_only_service():
rule = SpanSamplingRule(service="test_service", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule)
assert_sampling_decision_tags(span)
def test_single_span_rule_match_only_name():
rule = SpanSamplingRule(name="test_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule)
assert_sampling_decision_tags(span)
# NB more extensive testing of the matching is done in test_glob_matcher.py
def test_single_span_rule_match_wildcards():
rule = SpanSamplingRule(service="test_*", name="test?????", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule)
assert_sampling_decision_tags(span)
def test_single_span_rule_no_match_wildcards():
rule = SpanSamplingRule(service="*test_", name="test_nam??", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_single_span_rule_unsupported_pattern_bracket_expansion():
rule = SpanSamplingRule(service="test_servic[a-z]+", name="test_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_single_span_rule_unsupported_pattern_escape_character():
rule = SpanSamplingRule(service="test_servic[?]", name="test_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_single_span_rule_unsupported_pattern_bracket_expansion_literal_evaluation():
rule = SpanSamplingRule(service="test_servic[a-z]+", name="test_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule, service="test_servic[a-z]+")
assert_sampling_decision_tags(span)
def test_single_span_rule_unsupported_pattern_escape_character_literal_evaluation():
rule = SpanSamplingRule(service="test_servic[?]", name="test_name", sample_rate=1.0, max_per_second=-1)
span = traced_function(rule, service="test_servic[?]")
assert_sampling_decision_tags(span)
def test_multiple_span_rule_match():
rule = SpanSamplingRule(service="test_service", name="test_name", sample_rate=1.0, max_per_second=-1)
for _ in range(10):
span = traced_function(rule)
assert_sampling_decision_tags(span)
def test_single_span_rules_not_applied_if_span_dropped_by_single_span_rate_limiter():
rule = SpanSamplingRule(service="test_service", name="test_name", sample_rate=1.0, max_per_second=0)
for _ in range(10):
span = traced_function(rule)
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
def test_max_per_sec_with_is_allowed_check():
rule = SpanSamplingRule(service="test_service", name="test_name", sample_rate=1.0, max_per_second=2)
# Make spans till we hit the limit, then make a span while the limit is hit and make sure tags were not added.
while True:
span = traced_function(rule)
if not rule._limiter._is_allowed(span.start_ns):
break
assert_sampling_decision_tags(span, limit=2)
rate_limited_span = traced_function(rule)
assert_sampling_decision_tags(rate_limited_span, sample_rate=None, mechanism=None, limit=None)
def test_max_per_sec_with_predetermined_number_of_spans():
rule = SpanSamplingRule(service="test_service", name="test_name", sample_rate=1.0, max_per_second=2)
for i in range(3):
span = traced_function(rule)
if i < 2:
assert_sampling_decision_tags(span, limit=2)
else:
assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)
|
dd7a24b40392a3e6aa4d64ed75d48b3dad0ad0b2
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/test/exceptions/test_tensor_index.py
|
0b7ab40f938b38f7eed961b3c7611e3b12e1e081
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 2,563
|
py
|
test_tensor_index.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow.unittest
import oneflow as flow
class TestTensorIndexError(flow.unittest.TestCase):
def test_PrepareSliceIndices_indices_amount_index_error(test_case):
with test_case.assertRaises(IndexError) as context:
x = flow.arange(16).reshape(4, 4)
x[0, 0, 0] = 0
test_case.assertTrue(
"Too many indices for tensor of dimension" in str(context.exception)
)
def test_PrepareSliceIndices_slice_step_runtime_error(test_case):
with test_case.assertRaises(RuntimeError) as context:
x = flow.tensor([0, 1, 2, 3], dtype=flow.int32)
s = slice(0, 2, -1)
y = x[s]
test_case.assertTrue("Step must be greater than zero" in str(context.exception))
def test_ApplySelectIndexing_input_dim_runtime_error(test_case):
with test_case.assertRaises(RuntimeError) as context:
x = flow.tensor(5, dtype=flow.int32)
y = x[0]
test_case.assertTrue(
"select() cannot be applied to a 0-dim tensor." in str(context.exception)
)
def test_ApplySelectIndexing_index_error(test_case):
with test_case.assertRaises(IndexError) as context:
x = flow.ones(2, 3, dtype=flow.int32)
y = x[3]
test_case.assertTrue(
"Index out of range (expected to be in range of" in str(context.exception)
)
def test_ApplyAdvancedIndexing_index_error(test_case):
with test_case.assertRaises(IndexError) as context:
x = flow.ones(2, 2, dtype=flow.int32)
index = (
flow.tensor(1, dtype=flow.int32),
flow.tensor(1, dtype=flow.int32),
flow.tensor(1, dtype=flow.int32),
)
y = x[index]
test_case.assertTrue(
"Too many indices for tensor of dimension" in str(context.exception)
)
if __name__ == "__main__":
unittest.main()
|
fc139e5758a9e750d0080da87a57f080c535a521
|
160f08e768d7271f9522ad2597ac4ee79c04477a
|
/src/c3nav/mapdata/render/geometry/altitudearea.py
|
f7ffc65a73da4dfbcb0939bcbd5b54e6b9cf2382
|
[
"Apache-2.0"
] |
permissive
|
c3nav/c3nav
|
6254724dfc8589ee03c6028577befd7c65b05857
|
1a4ef5caa06ddacc8d9370b5adcee248fd4f55f7
|
refs/heads/main
| 2023-08-04T08:36:18.431458
| 2023-07-24T09:57:18
| 2023-07-24T09:57:18
| 56,852,994
| 140
| 47
|
Apache-2.0
| 2023-07-05T22:55:27
| 2016-04-22T12:13:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,722
|
py
|
altitudearea.py
|
from collections import deque
from itertools import chain
import numpy as np
from c3nav.mapdata.models import AltitudeArea
from c3nav.mapdata.render.geometry.hybrid import HybridGeometry
class AltitudeAreaGeometries:
def __init__(self, altitudearea=None, colors=None, obstacles=None):
if altitudearea is not None:
self.geometry = altitudearea.geometry
self.altitude = int(altitudearea.altitude * 1000)
self.altitude2 = None if altitudearea.altitude2 is None else int(altitudearea.altitude2 * 1000)
self.point1 = altitudearea.point1
self.point2 = altitudearea.point2
else:
self.geometry = None
self.altitude = None
self.altitude2 = None
self.point1 = None
self.point2 = None
self.base = None
self.bottom = None
self.colors = colors
self.obstacles = obstacles
def get_altitudes(self, points):
# noinspection PyCallByClass,PyTypeChecker
return AltitudeArea.get_altitudes(self, points/1000).astype(np.int32)
def create_hybrid_geometries(self, face_centers, vertices_offset, faces_offset):
self.geometry = HybridGeometry.create(self.geometry, face_centers)
vertices = deque()
faces = deque()
for color, areas in self.colors.items():
for height in tuple(areas.keys()):
faces_offset, vertices_offset = self._call_create_full(areas, height, faces, vertices,
faces_offset, vertices_offset)
for height_obstacles in self.obstacles.values():
for color_obstacles in height_obstacles.values():
for i in range(len(color_obstacles)):
faces_offset, vertices_offset = self._call_create_full(color_obstacles, i, faces, vertices,
faces_offset, vertices_offset)
if not vertices:
return np.empty((0, 2), dtype=np.int32), np.empty((0, 3), dtype=np.uint32)
return np.vstack(vertices), np.vstack(faces)
def _call_create_full(self, mapping, key, faces, vertices, faces_offset, vertices_offset):
geom = mapping[key]
new_geom, new_vertices, new_faces = HybridGeometry.create_full(geom, vertices_offset, faces_offset)
mapping[key] = new_geom
vertices_offset += new_vertices.shape[0]
faces_offset += new_faces.shape[0]
vertices.append(new_vertices)
faces.append(new_faces)
return faces_offset, vertices_offset
def remove_faces(self, faces):
self.geometry.remove_faces(faces)
for areas in self.colors.values():
for area in areas.values():
area.remove_faces(faces)
def create_polyhedrons(self, create_polyhedron, altitudes, min_altitude, crops):
if self.altitude2 is None:
altitudes = self.altitude
self.base = HybridGeometry(self.geometry.geom, self.geometry.faces)
self.bottom = HybridGeometry(self.geometry.geom, self.geometry.faces)
self.geometry.build_polyhedron(create_polyhedron,
lower=altitudes - int(0.7 * 1000),
upper=altitudes,
crops=crops)
self.base.build_polyhedron(create_polyhedron,
lower=min_altitude - int(0.7 * 1000),
upper=altitudes - int(0.7 * 1000),
crops=crops,
top=False, bottom=False)
self.bottom.build_polyhedron(create_polyhedron,
lower=0, upper=1,
crops=crops,
top=False)
for geometry in chain(*(areas.values() for areas in self.colors.values())):
geometry.build_polyhedron(create_polyhedron,
lower=altitudes,
upper=altitudes + int(0.001 * 1000),
crops=crops)
# todo: treat altitude properly
for height, height_geometries in self.obstacles.items():
for color, color_geometries in height_geometries.items():
for geometry in color_geometries:
geometry.build_polyhedron(create_polyhedron,
lower=altitudes,
upper=altitudes + height,
crops=crops)
|
78e8b4d8970e00cd492a094bd691a55dc0f17058
|
2617bfec230858814b32795c6a47249c54a15cac
|
/cupy_alias/creation/__init__.py
|
5ba6c2b08f0b4fc37989c1b9581142e0b4e9f4c2
|
[
"MIT",
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
fixstars/clpy
|
a06a1281887470d8faee3ec204b56fbef2496fab
|
693485f85397cc110fa45803c36c30c24c297df0
|
refs/heads/clpy
| 2021-06-10T04:00:30.974447
| 2021-02-28T06:01:26
| 2021-02-28T06:01:26
| 136,439,592
| 154
| 20
|
NOASSERTION
| 2021-04-07T02:41:03
| 2018-06-07T07:33:04
|
Python
|
UTF-8
|
Python
| false
| false
| 36
|
py
|
__init__.py
|
from clpy.creation import * # NOQA
|
76ba6e7f7e9d5af60559ce31b0ed02848a83d206
|
a223de4de3e21f30771d6a67db61b99c0f3b4685
|
/.github/scripts/util.py
|
b4dc8d79803b51cead0e2f286ddfc20bf4bbff6a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
juicedata/juicefs-csi-driver
|
de40eea4cadcec7a2b6e4c483fba2ed935bbfafe
|
4dedd3f04502d2d92b237d0d11aa1a4dfb181571
|
refs/heads/master
| 2023-09-02T23:18:42.777570
| 2023-09-01T06:46:01
| 2023-09-01T06:46:01
| 196,400,330
| 158
| 67
|
Apache-2.0
| 2023-09-12T06:19:11
| 2019-07-11T13:28:51
|
Go
|
UTF-8
|
Python
| false
| false
| 11,012
|
py
|
util.py
|
# Copyright 2022 Juicedata Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
import random
import re
import string
import subprocess
import time
from pathlib import Path
from kubernetes import client
from config import KUBE_SYSTEM, LOG, IS_CE, SECRET_NAME, GLOBAL_MOUNTPOINT, SECRET_KEY, ACCESS_KEY, META_URL, \
BUCKET, TOKEN, STORAGECLASS_NAME
from model import Pod, Secret, STORAGE, StorageClass, PODS, DEPLOYMENTs, PVCs, PVs, SECRETs, STORAGECLASSs
def check_do_test():
if IS_CE:
return True
if TOKEN == "":
return False
return True
def die(e):
csi_node_name = os.getenv("JUICEFS_CSI_NODE_POD")
if csi_node_name is not None:
po = Pod(name=csi_node_name, deployment_name="", replicas=1, namespace=KUBE_SYSTEM)
LOG.info("Get csi node log:")
LOG.info(po.get_log("juicefs-plugin"))
LOG.info("Get csi controller log:")
controller_po = Pod(name="juicefs-csi-controller-0", deployment_name="", replicas=1, namespace=KUBE_SYSTEM)
LOG.info(controller_po.get_log("juicefs-plugin"))
LOG.info("Get event: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "event", "--all-namespaces"], check=True)
LOG.info("Get pvc: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "pvc", "--all-namespaces"], check=True)
LOG.info("Get pv: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "pv"], check=True)
LOG.info("Get sc: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "sc"], check=True)
LOG.info("Get job: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "job", "--all-namespaces"], check=True)
raise Exception(e)
def mount_on_host(mount_path):
LOG.info(f"Mount {mount_path}")
try:
if IS_CE:
subprocess.check_call(
["sudo", "/usr/local/bin/juicefs", "format", f"--storage={STORAGE}", f"--access-key={ACCESS_KEY}",
f"--secret-key={SECRET_KEY}", f"--bucket={BUCKET}", META_URL, SECRET_NAME])
subprocess.check_call(["sudo", "/usr/local/bin/juicefs", "mount", "-d", META_URL, mount_path])
else:
subprocess.check_call(
["sudo", "/usr/bin/juicefs", "auth", f"--token={TOKEN}", f"--accesskey={ACCESS_KEY}",
f"--secretkey={SECRET_KEY}", f"--bucket={BUCKET}", SECRET_NAME])
subprocess.check_call(["sudo", "/usr/bin/juicefs", "mount", "-d", SECRET_NAME, mount_path])
LOG.info("Mount success.")
except Exception as e:
LOG.info("Error in juicefs mount: {}".format(e))
raise e
def umount(mount_path):
subprocess.check_call(["sudo", "umount", mount_path, "-l"])
def check_mount_point(check_path):
check_path = GLOBAL_MOUNTPOINT + "/" + check_path
for i in range(0, 60):
try:
LOG.info("Open file {}".format(check_path))
with open(check_path) as f:
content = f.read(1)
if content is not None and content != "":
return True
time.sleep(5)
except FileNotFoundError:
LOG.info(os.listdir(GLOBAL_MOUNTPOINT))
LOG.info("Can't find file: {}".format(check_path))
time.sleep(5)
continue
except Exception as e:
LOG.info(e)
log = open("/var/log/juicefs.log", "rt")
LOG.info(log.read())
raise e
return False
def check_quota(name, expected):
output = ""
for i in range(0, 10):
process = subprocess.run([
"kubectl", "exec", name, "-c", "app", "-n", "default", "-t", "--", "df", "-h"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if process.returncode is not None and process.returncode != 0:
raise Exception("df -h failed: {}".format(process.stderr))
output = process.stdout
quota = None
for line in process.stdout.split("\n"):
if line.startswith("JuiceFS:"):
items = line.split()
if len(items) >= 2:
quota = items[1]
if quota is None:
raise Exception("df -h result does not contain juicefs info:\n{}".format(process.stdout))
if quota != expected:
time.sleep(1)
continue
LOG.info("df -h result: {}".format(process.stdout))
return
raise Exception("quota is not set:\n{}".format(output))
def wait_dir_empty(check_path):
LOG.info(f"check path {check_path} empty")
for i in range(0, 60):
output = subprocess.run(["sudo", "ls", check_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if output.stderr.decode("utf-8") != "":
LOG.info("output stderr {}".format(output.stderr.decode("utf-8")))
return True
if output.stdout.decode("utf-8") == "":
return True
time.sleep(5)
return False
def wait_dir_not_empty(check_path):
LOG.info(f"check path {check_path} not empty")
for i in range(0, 60):
output = subprocess.run(["sudo", "ls", check_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if output.stderr.decode("utf-8") != "":
LOG.info("output stderr {}".format(output.stderr.decode("utf-8")))
continue
if output.stdout.decode("utf-8") != "":
return True
time.sleep(5)
return False
def get_only_mount_pod_name(volume_id):
pods = client.CoreV1Api().list_namespaced_pod(
namespace=KUBE_SYSTEM,
label_selector="volume-id={}".format(volume_id)
)
if len(pods.items) == 0:
raise Exception("Can't get mount pod of volume id {}".format(volume_id))
if len(pods.items) > 1:
raise Exception("Get more than one mount pod of volume id {}".format(volume_id))
return pods.items[0].metadata.name
def get_mount_pods(volume_id):
pods = client.CoreV1Api().list_namespaced_pod(
namespace=KUBE_SYSTEM,
label_selector="volume-id={}".format(volume_id)
)
return pods
def get_voldel_job(volume_id):
hash_object = hashlib.sha256(volume_id.encode('utf-8'))
hash_value = hash_object.hexdigest()[:16]
juicefs_hash = f"juicefs-{hash_value}"[:16]
for i in range(0, 300):
try:
job = client.BatchV1Api().read_namespaced_job(
namespace=KUBE_SYSTEM,
name=f"{juicefs_hash}-delvol"
)
return job
except client.exceptions.ApiException as e:
if e.status == 404:
time.sleep(0.5)
continue
raise e
def check_pod_ready(pod):
if pod.status.phase.lower() != "running":
LOG.info("Pod {} status phase: {}".format(pod.metadata.name, pod.status.phase))
return False
conditions = pod.status.conditions
for c in conditions:
if c.status != "True":
return False
return True
def check_mount_pod_refs(pod_name, replicas):
pod = client.CoreV1Api().read_namespaced_pod(name=pod_name, namespace=KUBE_SYSTEM)
annotations = pod.metadata.annotations
if annotations is None:
if replicas == 0:
return True
else:
return False
num = 0
for k, v in annotations.items():
if k.startswith("juicefs-") and "/var/lib/kubelet/pods" in v:
num += 1
return num == replicas
def deploy_secret_and_sc():
LOG.info("Deploy secret & storageClass..")
secret = Secret(secret_name=SECRET_NAME)
secret.create()
LOG.info("Deploy secret {}".format(secret.secret_name))
sc = StorageClass(name=STORAGECLASS_NAME, secret_name=secret.secret_name)
sc.create()
LOG.info("Deploy storageClass {}".format(sc.name))
def tear_down():
LOG.info("Tear down all resources begin..")
try:
for po in PODS:
LOG.info("Delete pod {}".format(po.name))
po.delete()
LOG.info("Watch for pods {} for delete.".format(po.name))
result = po.watch_for_delete(1)
if not result:
raise Exception("Pods {} are not delete within 5 min.".format(po.name))
for deploy in DEPLOYMENTs:
LOG.info("Delete deployment {}".format(deploy.name))
deploy = deploy.refresh()
deploy.delete()
pod = Pod(name="", deployment_name=deploy.name, replicas=deploy.replicas)
LOG.info("Watch for pods of deployment {} for delete.".format(deploy.name))
result = pod.watch_for_delete(deploy.replicas)
if not result:
raise Exception("Pods of deployment {} are not delete within 5 min.".format(deploy.name))
for pvc in PVCs:
LOG.info("Delete pvc {}".format(pvc.name))
pvc.delete()
for sc in STORAGECLASSs:
LOG.info("Delete storageclass {}".format(sc.name))
sc.delete()
for pv in PVs:
LOG.info("Delete pv {}".format(pv.name))
pv.delete()
for secret in SECRETs:
LOG.info("Delete secret {}".format(secret.secret_name))
secret.delete()
LOG.info("Delete all volumes in file system.")
clean_juicefs_volume()
except Exception as e:
LOG.info("Error in tear down: {}".format(e))
LOG.info("Tear down success.")
def clean_juicefs_volume():
visible_files = [file for file in Path(GLOBAL_MOUNTPOINT).iterdir() if not file.name.startswith(".")]
if len(visible_files) != 0:
if IS_CE:
subprocess.check_call(["/usr/local/bin/juicefs rmr " + GLOBAL_MOUNTPOINT + "/*"], shell=True)
else:
# only delete files out of 3 days
for file in visible_files:
try:
f_time = file.stat().st_ctime
now = time.time()
if now - f_time > 3600 * 24 * 3:
subprocess.run(["/usr/bin/juicefs", "rmr", str(file)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError:
continue
def gen_random_string(slen=10):
return ''.join(random.sample(string.ascii_letters + string.digits, slen))
def get_vol_uuid(name):
output = subprocess.run(
["sudo", "/usr/local/bin/juicefs", "status", name], stdout=subprocess.PIPE)
out = output.stdout.decode("utf-8")
return re.search("\"UUID\": \"(.*)\"", out).group(1)
|
0f52d7163160a50cc8e57f8e6ab24b8c4f7be287
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/pkg/tests/integration/test_salt_key.py
|
87275a677fa1a7ea31f6e03e5d4f939d7fb0937b
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 260
|
py
|
test_salt_key.py
|
import pytest
pytestmark = [
pytest.mark.skip_on_windows,
]
def test_salt_key(salt_key_cli, salt_minion):
"""
Test running salt-key -L
"""
ret = salt_key_cli.run("-L")
assert ret.data
assert salt_minion.id in ret.data["minions"]
|
f40333e5b3c6d82b93ce42f80449eb34d4f281d6
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/networkx/generators/tests/test_random_graphs.py
|
4263cebc8e565dcaa818cf2ec267807c79b76488
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 10,602
|
py
|
test_random_graphs.py
|
# -*- encoding: utf-8 -*-
# test_random_graphs.py - unit tests for random graph generators
#
# Copyright 2010-2019 NetworkX developers.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
"""Unit tests for the :mod:`networkx.generators.random_graphs` module.
"""
from nose.tools import assert_almost_equal
from nose.tools import assert_greater
from nose.tools import assert_less
from nose.tools import assert_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from networkx.exception import NetworkXError
from networkx.generators.random_graphs import barabasi_albert_graph
from networkx.generators.random_graphs import dual_barabasi_albert_graph
from networkx.generators.random_graphs import extended_barabasi_albert_graph
from networkx.generators.random_graphs import binomial_graph
from networkx.generators.random_graphs import connected_watts_strogatz_graph
from networkx.generators.random_graphs import dense_gnm_random_graph
from networkx.generators.random_graphs import erdos_renyi_graph
from networkx.generators.random_graphs import fast_gnp_random_graph
from networkx.generators.random_graphs import gnm_random_graph
from networkx.generators.random_graphs import gnp_random_graph
from networkx.generators.random_graphs import newman_watts_strogatz_graph
from networkx.generators.random_graphs import powerlaw_cluster_graph
from networkx.generators.random_graphs import random_kernel_graph
from networkx.generators.random_graphs import random_lobster
from networkx.generators.random_graphs import random_powerlaw_tree
from networkx.generators.random_graphs import random_powerlaw_tree_sequence
from networkx.generators.random_graphs import random_regular_graph
from networkx.generators.random_graphs import random_shell_graph
from networkx.generators.random_graphs import watts_strogatz_graph
class TestGeneratorsRandom(object):
def smoke_test_random_graph(self):
seed = 42
G = gnp_random_graph(100, 0.25, seed)
G = gnp_random_graph(100, 0.25, seed, directed=True)
G = binomial_graph(100, 0.25, seed)
G = erdos_renyi_graph(100, 0.25, seed)
G = fast_gnp_random_graph(100, 0.25, seed)
G = fast_gnp_random_graph(100, 0.25, seed, directed=True)
G = gnm_random_graph(100, 20, seed)
G = gnm_random_graph(100, 20, seed, directed=True)
G = dense_gnm_random_graph(100, 20, seed)
G = watts_strogatz_graph(10, 2, 0.25, seed)
assert_equal(len(G), 10)
assert_equal(G.number_of_edges(), 10)
G = connected_watts_strogatz_graph(10, 2, 0.1, tries=10, seed=seed)
assert_equal(len(G), 10)
assert_equal(G.number_of_edges(), 10)
assert_raises(NetworkXError, connected_watts_strogatz_graph, \
10, 2, 0.1, tries=0)
G = watts_strogatz_graph(10, 4, 0.25, seed)
assert_equal(len(G), 10)
assert_equal(G.number_of_edges(), 20)
G = newman_watts_strogatz_graph(10, 2, 0.0, seed)
assert_equal(len(G), 10)
assert_equal(G.number_of_edges(), 10)
G = newman_watts_strogatz_graph(10, 4, 0.25, seed)
assert_equal(len(G), 10)
assert_true(G.number_of_edges() >= 20)
G = barabasi_albert_graph(100, 1, seed)
G = barabasi_albert_graph(100, 3, seed)
assert_equal(G.number_of_edges(), (97 * 3))
G = extended_barabasi_albert_graph(100, 1, 0, 0, seed)
assert_equal(G.number_of_edges(), 99)
G = extended_barabasi_albert_graph(100, 3, 0, 0, seed)
assert_equal(G.number_of_edges(), 97 * 3)
G = extended_barabasi_albert_graph(100, 1, 0, 0.5, seed)
assert_equal(G.number_of_edges(), 99)
G = extended_barabasi_albert_graph(100, 2, 0.5, 0, seed)
assert_greater(G.number_of_edges(), 100 * 3)
assert_less(G.number_of_edges(), 100 * 4)
G = extended_barabasi_albert_graph(100, 2, 0.3, 0.3, seed)
assert_greater(G.number_of_edges(), 100 * 2)
assert_less(G.number_of_edges(), 100 * 4)
G = powerlaw_cluster_graph(100, 1, 1.0, seed)
G = powerlaw_cluster_graph(100, 3, 0.0, seed)
assert_equal(G.number_of_edges(), (97 * 3))
G = random_regular_graph(10, 20, seed)
assert_raises(NetworkXError, random_regular_graph, 3, 21)
assert_raises(NetworkXError, random_regular_graph, 33, 21)
constructor = [(10, 20, 0.8), (20, 40, 0.8)]
G = random_shell_graph(constructor, seed)
G = random_lobster(10, 0.1, 0.5, seed)
# difficult to find seed that requires few tries
seq = random_powerlaw_tree_sequence(10, 3, seed=14, tries=1)
G = random_powerlaw_tree(10, 3, seed=14, tries=1)
def test_dual_barabasi_albert(self, m1=1, m2=4, p=0.5):
"""
Tests that the dual BA random graph generated behaves consistently.
Tests the exceptions are raised as expected.
The graphs generation are repeated several times to prevent lucky shots
"""
seed = 42
repeats = 2
while repeats:
repeats -= 1
# This should be BA with m = m1
BA1 = barabasi_albert_graph(100, m1, seed)
DBA1 = dual_barabasi_albert_graph(100, m1, m2, 1, seed)
assert_equal(BA1.size(), DBA1.size())
# This should be BA with m = m2
BA2 = barabasi_albert_graph(100, m2, seed)
DBA2 = dual_barabasi_albert_graph(100, m1, m2, 0, seed)
assert_equal(BA2.size(), DBA2.size())
# Testing exceptions
dbag = dual_barabasi_albert_graph
assert_raises(NetworkXError, dbag, m1, m1, m2, 0)
assert_raises(NetworkXError, dbag, m2, m1, m2, 0)
assert_raises(NetworkXError, dbag, 100, m1, m2, -0.5)
assert_raises(NetworkXError, dbag, 100, m1, m2, 1.5)
def test_extended_barabasi_albert(self, m=2):
"""
Tests that the extended BA random graph generated behaves consistently.
Tests the exceptions are raised as expected.
The graphs generation are repeated several times to prevent lucky-shots
"""
seed = 42
repeats = 2
BA_model = barabasi_albert_graph(100, m, seed)
BA_model_edges = BA_model.number_of_edges()
while repeats:
repeats -= 1
# This behaves just like BA, the number of edges must be the same
G1 = extended_barabasi_albert_graph(100, m, 0, 0, seed)
assert_equal(G1.size(), BA_model_edges)
# More than twice more edges should have been added
G1 = extended_barabasi_albert_graph(100, m, 0.8, 0, seed)
assert_greater(G1.size(), BA_model_edges * 2)
# Only edge rewiring, so the number of edges less than original
G2 = extended_barabasi_albert_graph(100, m, 0, 0.8, seed)
assert_equal(G2.size(), BA_model_edges)
# Mixed scenario: less edges than G1 and more edges than G2
G3 = extended_barabasi_albert_graph(100, m, 0.3, 0.3, seed)
assert_greater(G3.size(), G2.size())
assert_less(G3.size(), G1.size())
# Testing exceptions
ebag = extended_barabasi_albert_graph
assert_raises(NetworkXError, ebag, m, m, 0, 0)
assert_raises(NetworkXError, ebag, 1, 0.5, 0, 0)
assert_raises(NetworkXError, ebag, 100, 2, 0.5, 0.5)
def test_random_zero_regular_graph(self):
"""Tests that a 0-regular graph has the correct number of nodes and
edges.
"""
seed = 42
G = random_regular_graph(0, 10, seed)
assert_equal(len(G), 10)
assert_equal(sum(1 for _ in G.edges()), 0)
def test_gnp(self):
for generator in [gnp_random_graph, binomial_graph, erdos_renyi_graph,
fast_gnp_random_graph]:
G = generator(10, -1.1)
assert_equal(len(G), 10)
assert_equal(sum(1 for _ in G.edges()), 0)
G = generator(10, 0.1)
assert_equal(len(G), 10)
G = generator(10, 0.1, seed=42)
assert_equal(len(G), 10)
G = generator(10, 1.1)
assert_equal(len(G), 10)
assert_equal(sum(1 for _ in G.edges()), 45)
G = generator(10, -1.1, directed=True)
assert_true(G.is_directed())
assert_equal(len(G), 10)
assert_equal(sum(1 for _ in G.edges()), 0)
G = generator(10, 0.1, directed=True)
assert_true(G.is_directed())
assert_equal(len(G), 10)
G = generator(10, 1.1, directed=True)
assert_true(G.is_directed())
assert_equal(len(G), 10)
assert_equal(sum(1 for _ in G.edges()), 90)
# assert that random graphs generate all edges for p close to 1
edges = 0
runs = 100
for i in range(runs):
edges += sum(1 for _ in generator(10, 0.99999, directed=True).edges())
assert_almost_equal(edges / float(runs), 90, delta=runs * 2.0 / 100)
def test_gnm(self):
G = gnm_random_graph(10, 3)
assert_equal(len(G), 10)
assert_equal(sum(1 for _ in G.edges()), 3)
G = gnm_random_graph(10, 3, seed=42)
assert_equal(len(G), 10)
assert_equal(sum(1 for _ in G.edges()), 3)
G = gnm_random_graph(10, 100)
assert_equal(len(G), 10)
assert_equal(sum(1 for _ in G.edges()), 45)
G = gnm_random_graph(10, 100, directed=True)
assert_equal(len(G), 10)
assert_equal(sum(1 for _ in G.edges()), 90)
G = gnm_random_graph(10, -1.1)
assert_equal(len(G), 10)
assert_equal(sum(1 for _ in G.edges()), 0)
def test_watts_strogatz_big_k(self):
assert_raises(NetworkXError, watts_strogatz_graph, 10, 10, 0.25)
assert_raises(NetworkXError, newman_watts_strogatz_graph, 10, 10, 0.25)
# could create an infinite loop, now doesn't
# infinite loop used to occur when a node has degree n-1 and needs to rewire
watts_strogatz_graph(10, 9, 0.25, seed=0)
newman_watts_strogatz_graph(10, 9, 0.5, seed=0)
def test_random_kernel_graph(self):
def integral(u, w, z):
return c * (z - w)
def root(u, w, r):
return r / c + w
c = 1
graph = random_kernel_graph(1000, integral, root)
graph = random_kernel_graph(1000, integral, root, seed=42)
assert_equal(len(graph), 1000)
|
ce5f105d8fdc1b90bf7fe24858628d413ffc4c1a
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/pytorch/source/torch/distributions/lowrank_multivariate_normal.py
|
019ac62f3bfcf3e466e66394574e12888e2abb32
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 10,135
|
py
|
lowrank_multivariate_normal.py
|
import math
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.multivariate_normal import (_batch_diag, _batch_mahalanobis, _batch_mv,
_batch_trtrs_lower)
from torch.distributions.utils import _standard_normal, lazy_property
def _batch_vector_diag(bvec):
"""
Returns the diagonal matrices of a batch of vectors.
"""
n = bvec.size(-1)
bmat = bvec.new_zeros(bvec.shape + (n,))
bmat.view(bvec.shape[:-1] + (-1,))[..., ::n + 1] = bvec
return bmat
def _batch_capacitance_tril(W, D):
r"""
Computes Cholesky of :math:`I + W.T @ inv(D) @ W` for a batch of matrices :math:`W`
and a batch of vectors :math:`D`.
"""
m = W.size(-1)
Wt_Dinv = W.transpose(-1, -2) / D.unsqueeze(-2)
K = torch.matmul(Wt_Dinv, W).contiguous()
K.view(-1, m * m)[:, ::m + 1] += 1 # add identity matrix to K
return torch.cholesky(K)
def _batch_lowrank_logdet(W, D, capacitance_tril):
r"""
Uses "matrix determinant lemma"::
log|W @ W.T + D| = log|C| + log|D|,
where :math:`C` is the capacitance matrix :math:`I + W.T @ inv(D) @ W`, to compute
the log determinant.
"""
return 2 * _batch_diag(capacitance_tril).log().sum(-1) + D.log().sum(-1)
def _batch_lowrank_mahalanobis(W, D, x, capacitance_tril):
r"""
Uses "Woodbury matrix identity"::
inv(W @ W.T + D) = inv(D) - inv(D) @ W @ inv(C) @ W.T @ inv(D),
where :math:`C` is the capacitance matrix :math:`I + W.T @ inv(D) @ W`, to compute the squared
Mahalanobis distance :math:`x.T @ inv(W @ W.T + D) @ x`.
"""
Wt_Dinv = W.transpose(-1, -2) / D.unsqueeze(-2)
Wt_Dinv_x = _batch_mv(Wt_Dinv, x)
mahalanobis_term1 = (x.pow(2) / D).sum(-1)
mahalanobis_term2 = _batch_mahalanobis(capacitance_tril, Wt_Dinv_x)
return mahalanobis_term1 - mahalanobis_term2
class LowRankMultivariateNormal(Distribution):
r"""
Creates a multivariate normal distribution with covariance matrix having a low-rank form
parameterized by :attr:`cov_factor` and :attr:`cov_diag`::
covariance_matrix = cov_factor @ cov_factor.T + cov_diag
Example:
>>> m = LowRankMultivariateNormal(torch.zeros(2), torch.tensor([1, 0]), torch.tensor([1, 1]))
>>> m.sample() # normally distributed with mean=`[0,0]`, cov_factor=`[1,0]`, cov_diag=`[1,1]`
tensor([-0.2102, -0.5429])
Args:
loc (Tensor): mean of the distribution with shape `batch_shape + event_shape`
cov_factor (Tensor): factor part of low-rank form of covariance matrix with shape
`batch_shape + event_shape + (rank,)`
cov_diag (Tensor): diagonal part of low-rank form of covariance matrix with shape
`batch_shape + event_shape`
Note:
The computation for determinant and inverse of covariance matrix is avoided when
`cov_factor.shape[1] << cov_factor.shape[0]` thanks to `Woodbury matrix identity
<https://en.wikipedia.org/wiki/Woodbury_matrix_identity>`_ and
`matrix determinant lemma <https://en.wikipedia.org/wiki/Matrix_determinant_lemma>`_.
Thanks to these formulas, we just need to compute the determinant and inverse of
the small size "capacitance" matrix::
capacitance = I + cov_factor.T @ inv(cov_diag) @ cov_factor
"""
arg_constraints = {"loc": constraints.real,
"cov_factor": constraints.real,
"cov_diag": constraints.positive}
support = constraints.real
has_rsample = True
def __init__(self, loc, cov_factor, cov_diag, validate_args=None):
if loc.dim() < 1:
raise ValueError("loc must be at least one-dimensional.")
event_shape = loc.shape[-1:]
if cov_factor.dim() < 2:
raise ValueError("cov_factor must be at least two-dimensional, "
"with optional leading batch dimensions")
if cov_factor.shape[-2:-1] != event_shape:
raise ValueError("cov_factor must be a batch of matrices with shape {} x m"
.format(event_shape[0]))
if cov_diag.shape[-1:] != event_shape:
raise ValueError("cov_diag must be a batch of vectors with shape {}".format(event_shape))
loc_ = loc.unsqueeze(-1)
cov_diag_ = cov_diag.unsqueeze(-1)
try:
loc_, self.cov_factor, cov_diag_ = torch.broadcast_tensors(loc_, cov_factor, cov_diag_)
except RuntimeError:
raise ValueError("Incompatible batch shapes: loc {}, cov_factor {}, cov_diag {}"
.format(loc.shape, cov_factor.shape, cov_diag.shape))
self.loc = loc_[..., 0]
self.cov_diag = cov_diag_[..., 0]
batch_shape = self.loc.shape[:-1]
self._unbroadcasted_cov_factor = cov_factor
self._unbroadcasted_cov_diag = cov_diag
self._capacitance_tril = _batch_capacitance_tril(cov_factor, cov_diag)
super(LowRankMultivariateNormal, self).__init__(batch_shape, event_shape,
validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LowRankMultivariateNormal, _instance)
batch_shape = torch.Size(batch_shape)
loc_shape = batch_shape + self.event_shape
new.loc = self.loc.expand(loc_shape)
new.cov_diag = self.cov_diag.expand(loc_shape)
new.cov_factor = self.cov_factor.expand(loc_shape + self.cov_factor.shape[-1:])
new._unbroadcasted_cov_factor = self._unbroadcasted_cov_factor
new._unbroadcasted_cov_diag = self._unbroadcasted_cov_diag
new._capacitance_tril = self._capacitance_tril
super(LowRankMultivariateNormal, new).__init__(batch_shape,
self.event_shape,
validate_args=False)
new._validate_args = self._validate_args
return new
@property
def mean(self):
return self.loc
@lazy_property
def variance(self):
return (self._unbroadcasted_cov_factor.pow(2).sum(-1)
+ self._unbroadcasted_cov_diag).expand(self._batch_shape + self._event_shape)
@lazy_property
def scale_tril(self):
# The following identity is used to increase the numerically computation stability
# for Cholesky decomposition (see http://www.gaussianprocess.org/gpml/, Section 3.4.3):
# W @ W.T + D = D1/2 @ (I + D-1/2 @ W @ W.T @ D-1/2) @ D1/2
# The matrix "I + D-1/2 @ W @ W.T @ D-1/2" has eigenvalues bounded from below by 1,
# hence it is well-conditioned and safe to take Cholesky decomposition.
n = self._event_shape[0]
cov_diag_sqrt_unsqueeze = self._unbroadcasted_cov_diag.sqrt().unsqueeze(-1)
Dinvsqrt_W = self._unbroadcasted_cov_factor / cov_diag_sqrt_unsqueeze
K = torch.matmul(Dinvsqrt_W, Dinvsqrt_W.transpose(-1, -2)).contiguous()
K.view(-1, n * n)[:, ::n + 1] += 1 # add identity matrix to K
scale_tril = cov_diag_sqrt_unsqueeze * torch.cholesky(K)
return scale_tril.expand(self._batch_shape + self._event_shape + self._event_shape)
@lazy_property
def covariance_matrix(self):
covariance_matrix = (torch.matmul(self._unbroadcasted_cov_factor,
self._unbroadcasted_cov_factor.transpose(-1, -2))
+ _batch_vector_diag(self._unbroadcasted_cov_diag))
return covariance_matrix.expand(self._batch_shape + self._event_shape +
self._event_shape)
@lazy_property
def precision_matrix(self):
# We use "Woodbury matrix identity" to take advantage of low rank form::
# inv(W @ W.T + D) = inv(D) - inv(D) @ W @ inv(C) @ W.T @ inv(D)
# where :math:`C` is the capacitance matrix.
Wt_Dinv = (self._unbroadcasted_cov_factor.transpose(-1, -2)
/ self._unbroadcasted_cov_diag.unsqueeze(-2))
A = _batch_trtrs_lower(Wt_Dinv, self._capacitance_tril)
precision_matrix = (_batch_vector_diag(self._unbroadcasted_cov_diag.reciprocal())
- torch.matmul(A.transpose(-1, -2), A))
return precision_matrix.expand(self._batch_shape + self._event_shape +
self._event_shape)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
W_shape = shape[:-1] + self.cov_factor.shape[-1:]
eps_W = _standard_normal(W_shape, dtype=self.loc.dtype, device=self.loc.device)
eps_D = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
return (self.loc + _batch_mv(self._unbroadcasted_cov_factor, eps_W)
+ self._unbroadcasted_cov_diag.sqrt() * eps_D)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
diff = value - self.loc
M = _batch_lowrank_mahalanobis(self._unbroadcasted_cov_factor,
self._unbroadcasted_cov_diag,
diff,
self._capacitance_tril)
log_det = _batch_lowrank_logdet(self._unbroadcasted_cov_factor,
self._unbroadcasted_cov_diag,
self._capacitance_tril)
return -0.5 * (self._event_shape[0] * math.log(2 * math.pi) + log_det + M)
def entropy(self):
log_det = _batch_lowrank_logdet(self._unbroadcasted_cov_factor,
self._unbroadcasted_cov_diag,
self._capacitance_tril)
H = 0.5 * (self._event_shape[0] * (1.0 + math.log(2 * math.pi)) + log_det)
if len(self._batch_shape) == 0:
return H
else:
return H.expand(self._batch_shape)
|
5143c501ff6f5c24b7c00f590ce24fea7f957920
|
36977d5aba5592ec4ee2090d16958f90df11997d
|
/tests/multivariate/secret_key_agreement/test_necessary_intrinsic_mutual_information.py
|
9dd8082c6d0a12349bc16e246d70cb68062962a4
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
dit/dit
|
8589f969973b204fed828d1f8133f8c30de2cb6b
|
b13c5020a2b8524527a4a0db5a81d8549142228c
|
refs/heads/master
| 2023-08-31T03:58:57.651496
| 2023-08-30T21:55:54
| 2023-08-30T21:55:54
| 13,201,610
| 468
| 95
|
BSD-3-Clause
| 2023-08-29T03:54:31
| 2013-09-29T23:03:51
|
Python
|
UTF-8
|
Python
| false
| false
| 531
|
py
|
test_necessary_intrinsic_mutual_information.py
|
"""
Tests for dit.multivariate.secret_key_agreement.skar_lower_bounds
"""
import pytest
from dit.example_dists.intrinsic import *
from dit.multivariate import necessary_intrinsic_mutual_information
@pytest.mark.flaky(reruns=5)
@pytest.mark.parametrize('dist', [intrinsic_1, intrinsic_2, intrinsic_3])
def test_nimi_1(dist):
"""
Test against known values.
"""
nimi = necessary_intrinsic_mutual_information(dist, [[0], [1]], [2], bound_u=2, bound_v=4)
assert nimi == pytest.approx(dist.secret_rate, abs=1e-5)
|
582ca1b4b3cb80be2fb5257e91f40704538409a7
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-meta/amundsen/databuilder/tests/unit/rest_api/mode_analytics/test_mode_paginated_rest_api_query.py
|
c0f9d863a374c9a0e501484e118a6194bd03d903
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 4,085
|
py
|
test_mode_paginated_rest_api_query.py
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
import unittest
from mock import call, patch
from databuilder.rest_api.base_rest_api_query import RestApiQuerySeed
from databuilder.rest_api.mode_analytics.mode_paginated_rest_api_query import ModePaginatedRestApiQuery
logging.basicConfig(level=logging.INFO)
class TestModePaginatedRestApiQuery(unittest.TestCase):
def test_pagination(self) -> None:
seed_record = [{'foo1': 'bar1'},
{'foo2': 'bar2'}]
seed_query = RestApiQuerySeed(seed_record=seed_record)
with patch('databuilder.rest_api.rest_api_query.requests.get') as mock_get:
json_path = 'foo[*].name'
field_names = ['name_field']
mock_get.return_value.json.side_effect = [ # need to duplicate for json() is called twice
{'foo': [{'name': 'v1'}, {'name': 'v2'}]},
{'foo': [{'name': 'v1'}, {'name': 'v2'}]},
{'foo': [{'name': 'v3'}]},
{'foo': [{'name': 'v3'}]},
{'foo': [{'name': 'v4'}, {'name': 'v5'}]},
{'foo': [{'name': 'v4'}, {'name': 'v5'}]},
{},
{}
]
query = ModePaginatedRestApiQuery(query_to_join=seed_query, url='foobar', params={},
json_path=json_path, field_names=field_names,
skip_no_result=True, pagination_json_path='foo[*]',
max_record_size=2)
expected_list = [
{'name_field': 'v1', 'foo1': 'bar1'},
{'name_field': 'v2', 'foo1': 'bar1'},
{'name_field': 'v3', 'foo1': 'bar1'},
{'name_field': 'v4', 'foo2': 'bar2'},
{'name_field': 'v5', 'foo2': 'bar2'}
]
for actual in query.execute():
self.assertDictEqual(actual, expected_list.pop(0))
self.assertEqual(mock_get.call_count, 4)
calls = [
call('foobar?page=1'),
call('foobar?page=2')
]
mock_get.assert_has_calls(calls, any_order=True)
def test_no_pagination(self) -> None:
seed_record = [{'foo1': 'bar1'},
{'foo2': 'bar2'},
{'foo3': 'bar3'}]
seed_query = RestApiQuerySeed(seed_record=seed_record)
with patch('databuilder.rest_api.rest_api_query.requests.get') as mock_get:
json_path = 'foo[*].name'
field_names = ['name_field']
mock_get.return_value.json.side_effect = [ # need to duplicate for json() is called twice
{'foo': [{'name': 'v1'}, {'name': 'v2'}]},
{'foo': [{'name': 'v1'}, {'name': 'v2'}]},
{'foo': [{'name': 'v3'}]},
{'foo': [{'name': 'v3'}]},
{'foo': [{'name': 'v4'}, {'name': 'v5'}]},
{'foo': [{'name': 'v4'}, {'name': 'v5'}]},
]
query = ModePaginatedRestApiQuery(query_to_join=seed_query, url='foobar', params={},
json_path=json_path, field_names=field_names,
pagination_json_path='foo[*]',
max_record_size=3)
expected_list = [
{'name_field': 'v1', 'foo1': 'bar1'},
{'name_field': 'v2', 'foo1': 'bar1'},
{'name_field': 'v3', 'foo2': 'bar2'},
{'name_field': 'v4', 'foo3': 'bar3'},
{'name_field': 'v5', 'foo3': 'bar3'}
]
for actual in query.execute():
self.assertDictEqual(actual, expected_list.pop(0))
self.assertEqual(mock_get.call_count, 3)
calls = [
call('foobar?page=1')
]
mock_get.assert_has_calls(calls, any_order=True)
if __name__ == '__main__':
unittest.main()
|
4fa2c4ef59f2038f8b663b836223547c6aa094c4
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/ai/modelscope/modelscope/trainers/hooks/lr_scheduler_hook.py
|
ed018fefde4603f7ca902c8af72261e4ef2651fd
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 4,754
|
py
|
lr_scheduler_hook.py
|
# Copyright (c) Alibaba, Inc. and its affiliates.
from modelscope.metainfo import Hooks
from modelscope.trainers.lrscheduler.builder import build_lr_scheduler
from modelscope.utils.constant import LogKeys
from modelscope.utils.logger import get_logger
from modelscope.utils.torch_utils import is_master
from .builder import HOOKS
from .hook import Hook
from .priority import Priority
@HOOKS.register_module(module_name=Hooks.LrSchedulerHook)
class LrSchedulerHook(Hook):
"""Lr scheduler.
Args:
by_epoch (bool): Whether lr changes by epoch
warmup (dict): warm up config
"""
PRIORITY = Priority.VERY_HIGH
def __init__(self, by_epoch=True, warmup=None) -> None:
super().__init__()
self.by_epoch = by_epoch
self.warmup = warmup
self.warmup_lr_scheduler = None
def before_run(self, trainer):
if self.warmup is not None:
assert isinstance(self.warmup, dict) and 'type' in self.warmup
self.warmup_lr_scheduler = build_lr_scheduler(
cfg=self.warmup,
default_args={'base_scheduler': trainer.lr_scheduler})
def get_current_lr(self, trainer):
import torch
if isinstance(trainer.optimizer, torch.optim.Optimizer):
lr = [group['lr'] for group in trainer.optimizer.param_groups]
elif isinstance(trainer.optimizer, dict):
lr = dict()
for name, optim in trainer.optimizer.items():
lr[name] = [group['lr'] for group in optim.param_groups]
else:
raise RuntimeError(
'lr is not applicable because optimizer does not exist.')
return lr
def before_train_iter(self, trainer):
if not self.by_epoch and trainer.iter >= getattr(
trainer, 'cumulative_iters', 1):
if self.warmup_lr_scheduler is not None:
self.warmup_lr_scheduler.step()
else:
trainer.lr_scheduler.step()
trainer.log_buffer.output[LogKeys.LR] = self._get_log_lr(trainer)
def before_train_epoch(self, trainer):
trainer.log_buffer.output[LogKeys.LR] = self._get_log_lr(trainer)
def after_train_epoch(self, trainer):
if self.by_epoch:
if self.warmup_lr_scheduler is not None:
self.warmup_lr_scheduler.step()
else:
trainer.lr_scheduler.step()
def _get_log_lr(self, trainer):
cur_lr = self.get_current_lr(trainer)
# only record lr of the first param group
if isinstance(cur_lr, list):
lr = cur_lr[0]
else:
assert isinstance(cur_lr, dict)
lr = {}
for k, lr_ in cur_lr.items():
assert isinstance(lr_, list)
lr.update({k: lr_[0]})
return lr
@HOOKS.register_module(module_name=Hooks.PlateauLrSchedulerHook)
class PlateauLrSchedulerHook(LrSchedulerHook):
"""Lr scheduler hook for `ReduceLROnPlateau`.
Args:
metric_key (str): Metric key returned from `trainer.metric_values`,
get the value of metric key and pass it to `ReduceLROnPlateau.step`.
by_epoch (bool): Whether lr changes by epoch
warmup (dict): warm up config
"""
PRIORITY = Priority.LOW # should be after EvaluationHook
def __init__(self, metric_key, by_epoch=True, warmup=None) -> None:
super().__init__(by_epoch=by_epoch, warmup=warmup)
self.metric_key = metric_key
def before_run(self, trainer):
super().before_run(trainer)
if not hasattr(trainer, 'logger'):
self.logger = get_logger(__name__)
else:
self.logger = trainer.logger
def after_train_epoch(self, trainer):
# adapt to evaluation intervel is greater than 1
if trainer.metric_values is None:
if is_master():
self.logger.warning(
f'Current epoch {trainer.epoch} has no evaluation metric values, skip lr_scheduler.step() !'
)
return
metrics = trainer.metric_values[self.metric_key]
if self.by_epoch:
if self.warmup_lr_scheduler is not None:
self.warmup_lr_scheduler.step(metrics=metrics)
else:
trainer.lr_scheduler.step(metrics=metrics)
@HOOKS.register_module(module_name=Hooks.NoneLrSchedulerHook)
class NoneLrSchedulerHook(LrSchedulerHook):
PRIORITY = Priority.LOW # should be after EvaluationHook
def __init__(self, by_epoch=True, warmup=None) -> None:
super().__init__(by_epoch=by_epoch, warmup=warmup)
def before_run(self, trainer):
return
def after_train_epoch(self, trainer):
return
|
5b3b5cb1f1d9fb26329b6617318710375627b850
|
38c290c804501eff492f1fa7ee8abb00b46c70ef
|
/runtime/stdlib/ffi/bitmask.py
|
4f41ab6ec7b5bae09e988cae15e7250a243057de
|
[
"MIT"
] |
permissive
|
cheery/lever
|
8a9524387bf3bc511889fa9a48f89927fd0b78f2
|
6fa8cd6afec440b32232f87236b0457fb8bfb8b1
|
refs/heads/master
| 2021-01-25T22:09:13.829448
| 2018-05-14T00:09:17
| 2018-05-14T00:09:17
| 45,874,533
| 144
| 13
| null | 2017-10-27T03:04:05
| 2015-11-09T23:40:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,778
|
py
|
bitmask.py
|
from simple import Type
from space import *
class Bitmask(Type):
#__slots__ = ["basetype", "parametric", "size", "align", "constants", "multichoice"]
#__attrs__ = ["basetype", "parametric", "size", "align", "constants", "multichoice"]
def __init__(self, basetype, constants, multichoice):
assert isinstance(basetype, Type)
self.basetype = basetype
self.parameter = basetype.parameter
self.size = basetype.size
self.align = basetype.align
self.constants = constants
self.multichoice = multichoice
def cast_to_ffitype(self):
return self.basetype.cast_to_ffitype()
# TODO: add method_signature for this situation?
def call(self, argv):
return bitmask_call([self] + argv)
def load(self, offset, copy):
value = self.basetype.load(offset, copy)
if isinstance(value, Integer):
return BitmaskValue(self, value.value)
return value
def store(self, pool, offset, value):
value = Integer(to_bitmask_digit(self, value))
return self.basetype.store(pool, offset, value)
@signature(Bitmask, Object)
def bitmask_call(self, value):
return BitmaskValue(self, to_bitmask_digit(self, value))
class BitmaskValue(Object):
__slots__ = ["bitmask", "value"]
__attrs__ = ["bitmask", "value"]
def __init__(self, bitmask, value):
self.bitmask = bitmask
self.value = value
def contains(self, item):
value = o_to_constant(self.bitmask, item)
if self.bitmask.multichoice: # Not sure if logic matches what we expect.
return self.value & value == value
else: # We will see that later.
return self.value == value
def getattr(self, name):
if name == u"value":
return Integer(self.value)
value = to_constant(self.bitmask, name)
if self.bitmask.multichoice:
return boolean(self.value & value == value)
else:
return boolean(self.value == value)
def repr(self):
seq = []
if self.bitmask.multichoice:
cover = 0
for name, mask in self.bitmask.constants.iteritems():
if self.value & mask == mask:
seq.append(String(name))
cover |= mask
weirdbits = self.value ^ cover
if weirdbits != 0:
seq.append(Integer(weirdbits))
return List(seq).repr()
else:
for name, mask in self.bitmask.constants.iteritems():
if mask == self.value:
return String(name).repr()
return Integer(self.value).repr()
def to_bitmask_digit(bitmask, value):
if isinstance(value, String):
return to_constant(bitmask, value.string)
elif isinstance(value, BitmaskValue) and value.bitmask is bitmask:
return value.value
elif isinstance(value, Integer):
return value.value
elif bitmask.multichoice:
mask = 0
it = value.iter()
try:
while True:
item = it.callattr(u"next", [])
if isinstance(item, String):
mask |= to_constant(bitmask, item.string)
elif isinstance(item, Integer):
mask |= item.value
elif isinstance(item, BitmaskValue) and item.bitmask is bitmask:
mask |= item.value
else:
raise unwind(LTypeError(u"enum cannot handle: " + item.repr()))
except StopIteration as _:
pass
return mask
else:
raise unwind(LTypeError(u"enum cannot handle: " + value.repr()))
@operators.cmp_.multimethod_s(BitmaskValue, List)
def cmp_bitmask_list(value, seq):
cmp_mask = 0
for item in seq.contents:
cmp_mask |= o_to_constant(value.bitmask, item)
if value.value == cmp_mask:
return Integer(0)
if value.value & cmp_mask == cmp_mask:
return Integer(1)
else:
return Integer(-1)
@operators.cmp_.multimethod_s(List, BitmaskValue)
def cmp_list_bitmask(seq, value):
cmp_mask = 0
for item in seq.contents:
cmp_mask |= o_to_constant(value.bitmask, item)
if value.value == cmp_mask:
return Integer(0)
if value.value & cmp_mask == cmp_mask:
return Integer(-1)
else:
return Integer(1)
def o_to_constant(bitmask, obj):
if isinstance(obj, String):
return to_constant(bitmask, obj.string)
else:
return cast(obj, Integer, u"bitmask value").value
def to_constant(bitmask, string):
try:
return bitmask.constants[string]
except KeyError as _:
raise unwind(LKeyError(bitmask, String(string)))
|
6b6b3eac3d5cc0616991dd7ba2a266795ac1bdb1
|
4d28185e7a78a569f9a449f39f183cac3024f711
|
/packages/Python/lldbsuite/test/tools/lldb-vscode/launch/TestVSCode_launch.py
|
dc7635289edbd0f0eccfed7198f3a252010ac6b2
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
apple/swift-lldb
|
2789bf44f648609a1674ee520ac20b64c95de072
|
d74be846ef3e62de946df343e8c234bde93a8912
|
refs/heads/stable
| 2023-04-06T00:28:15.882479
| 2019-10-25T22:46:59
| 2019-10-25T22:46:59
| 44,838,862
| 780
| 291
|
Apache-2.0
| 2020-01-10T19:28:43
| 2015-10-23T21:13:18
|
C++
|
UTF-8
|
Python
| false
| false
| 17,699
|
py
|
TestVSCode_launch.py
|
"""
Test lldb-vscode setBreakpoints request
"""
from __future__ import print_function
import unittest2
import vscode
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import lldbvscode_testcase
import os
class TestVSCode_launch(lldbvscode_testcase.VSCodeTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@skipIfWindows
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
@no_debug_info_test
def test_default(self):
'''
Tests the default launch of a simple program. No arguments,
environment, or anything else is specified.
'''
program = self.getBuildArtifact("a.out")
self.build_and_launch(program)
self.continue_to_exit()
# Now get the STDOUT and verify our program argument is correct
output = self.get_stdout()
self.assertTrue(output and len(output) > 0,
"expect program output")
lines = output.splitlines()
self.assertTrue(program in lines[0],
"make sure program path is in first argument")
@skipIfWindows
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
@no_debug_info_test
def test_stopOnEntry(self):
'''
Tests the default launch of a simple program that stops at the
entry point instead of continuing.
'''
program = self.getBuildArtifact("a.out")
self.build_and_launch(program, stopOnEntry=True)
self.set_function_breakpoints(['main'])
stopped_events = self.continue_to_next_stop()
for stopped_event in stopped_events:
if 'body' in stopped_event:
body = stopped_event['body']
if 'reason' in body:
reason = body['reason']
self.assertTrue(
reason != 'breakpoint',
'verify stop isn\'t "main" breakpoint')
@skipIfWindows
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
@no_debug_info_test
def test_cwd(self):
'''
Tests the default launch of a simple program with a current working
directory.
'''
program = self.getBuildArtifact("a.out")
program_parent_dir = os.path.realpath(
os.path.dirname(os.path.dirname(program)))
self.build_and_launch(program,
cwd=program_parent_dir)
self.continue_to_exit()
# Now get the STDOUT and verify our program argument is correct
output = self.get_stdout()
self.assertTrue(output and len(output) > 0,
"expect program output")
lines = output.splitlines()
found = False
for line in lines:
if line.startswith('cwd = \"'):
quote_path = '"%s"' % (program_parent_dir)
found = True
self.assertTrue(quote_path in line,
"working directory '%s' not in '%s'" % (
program_parent_dir, line))
self.assertTrue(found, "verified program working directory")
@skipIfWindows
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
@no_debug_info_test
def test_debuggerRoot(self):
'''
Tests the "debuggerRoot" will change the working directory of
the lldb-vscode debug adaptor.
'''
program = self.getBuildArtifact("a.out")
program_parent_dir = os.path.realpath(
os.path.dirname(os.path.dirname(program)))
commands = ['platform shell echo cwd = $PWD']
self.build_and_launch(program,
debuggerRoot=program_parent_dir,
initCommands=commands)
output = self.get_console()
self.assertTrue(output and len(output) > 0,
"expect console output")
lines = output.splitlines()
prefix = 'cwd = '
found = False
for line in lines:
if line.startswith(prefix):
found = True
self.assertTrue(program_parent_dir == line[len(prefix):],
"lldb-vscode working dir '%s' == '%s'" % (
program_parent_dir, line[6:]))
self.assertTrue(found, "verified lldb-vscode working directory")
self.continue_to_exit()
@skipIfWindows
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
@no_debug_info_test
def test_sourcePath(self):
'''
Tests the "sourcePath" will set the target.source-map.
'''
program = self.getBuildArtifact("a.out")
program_dir = os.path.dirname(program)
self.build_and_launch(program,
sourcePath=program_dir)
output = self.get_console()
self.assertTrue(output and len(output) > 0,
"expect console output")
lines = output.splitlines()
prefix = '(lldb) settings set target.source-map "." '
found = False
for line in lines:
if line.startswith(prefix):
found = True
quoted_path = '"%s"' % (program_dir)
self.assertTrue(quoted_path == line[len(prefix):],
"lldb-vscode working dir %s == %s" % (
quoted_path, line[6:]))
self.assertTrue(found, 'found "sourcePath" in console output')
self.continue_to_exit()
@skipIfWindows
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
@no_debug_info_test
def test_disableSTDIO(self):
'''
Tests the default launch of a simple program with STDIO disabled.
'''
program = self.getBuildArtifact("a.out")
self.build_and_launch(program,
disableSTDIO=True)
self.continue_to_exit()
# Now get the STDOUT and verify our program argument is correct
output = self.get_stdout()
self.assertTrue(output is None or len(output) == 0,
"expect no program output")
@skipIfWindows
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
@skipIfLinux # shell argument expansion doesn't seem to work on Linux
@expectedFailureNetBSD
@no_debug_info_test
def test_shellExpandArguments_enabled(self):
'''
Tests the default launch of a simple program with shell expansion
enabled.
'''
program = self.getBuildArtifact("a.out")
program_dir = os.path.dirname(program)
glob = os.path.join(program_dir, '*.out')
self.build_and_launch(program, args=[glob], shellExpandArguments=True)
self.continue_to_exit()
# Now get the STDOUT and verify our program argument is correct
output = self.get_stdout()
self.assertTrue(output and len(output) > 0,
"expect no program output")
lines = output.splitlines()
for line in lines:
quote_path = '"%s"' % (program)
if line.startswith("arg[1] ="):
self.assertTrue(quote_path in line,
'verify "%s" expanded to "%s"' % (
glob, program))
@skipIfWindows
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
@no_debug_info_test
def test_shellExpandArguments_disabled(self):
'''
Tests the default launch of a simple program with shell expansion
disabled.
'''
program = self.getBuildArtifact("a.out")
program_dir = os.path.dirname(program)
glob = os.path.join(program_dir, '*.out')
self.build_and_launch(program,
args=[glob],
shellExpandArguments=False)
self.continue_to_exit()
# Now get the STDOUT and verify our program argument is correct
output = self.get_stdout()
self.assertTrue(output and len(output) > 0,
"expect no program output")
lines = output.splitlines()
for line in lines:
quote_path = '"%s"' % (glob)
if line.startswith("arg[1] ="):
self.assertTrue(quote_path in line,
'verify "%s" stayed to "%s"' % (
glob, glob))
@skipIfWindows
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
@no_debug_info_test
def test_args(self):
'''
Tests launch of a simple program with arguments
'''
program = self.getBuildArtifact("a.out")
args = ["one", "with space", "'with single quotes'",
'"with double quotes"']
self.build_and_launch(program,
args=args)
self.continue_to_exit()
# Now get the STDOUT and verify our arguments got passed correctly
output = self.get_stdout()
self.assertTrue(output and len(output) > 0,
"expect program output")
lines = output.splitlines()
# Skip the first argument that contains the program name
lines.pop(0)
# Make sure arguments we specified are correct
for (i, arg) in enumerate(args):
quoted_arg = '"%s"' % (arg)
self.assertTrue(quoted_arg in lines[i],
'arg[%i] "%s" not in "%s"' % (i+1, quoted_arg, lines[i]))
@skipIfWindows
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
@no_debug_info_test
def test_environment(self):
'''
Tests launch of a simple program with environment variables
'''
program = self.getBuildArtifact("a.out")
env = ["NO_VALUE", "WITH_VALUE=BAR", "EMPTY_VALUE=",
"SPACE=Hello World"]
self.build_and_launch(program,
env=env)
self.continue_to_exit()
# Now get the STDOUT and verify our arguments got passed correctly
output = self.get_stdout()
self.assertTrue(output and len(output) > 0,
"expect program output")
lines = output.splitlines()
# Skip the all arguments so we have only environment vars left
while len(lines) and lines[0].startswith("arg["):
lines.pop(0)
# Make sure each environment variable in "env" is actually set in the
# program environment that was printed to STDOUT
for var in env:
found = False
for program_var in lines:
if var in program_var:
found = True
break
self.assertTrue(found,
'"%s" must exist in program environment (%s)' % (
var, lines))
@skipIfWindows
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
@no_debug_info_test
def test_commands(self):
'''
Tests the "initCommands", "preRunCommands", "stopCommands" and
"exitCommands" that can be passed during launch.
"initCommands" are a list of LLDB commands that get executed
before the targt is created.
"preRunCommands" are a list of LLDB commands that get executed
after the target has been created and before the launch.
"stopCommands" are a list of LLDB commands that get executed each
time the program stops.
"exitCommands" are a list of LLDB commands that get executed when
the process exits
'''
program = self.getBuildArtifact("a.out")
initCommands = ['target list', 'platform list']
preRunCommands = ['image list a.out', 'image dump sections a.out']
stopCommands = ['frame variable', 'bt']
exitCommands = ['expr 2+3', 'expr 3+4']
self.build_and_launch(program,
initCommands=initCommands,
preRunCommands=preRunCommands,
stopCommands=stopCommands,
exitCommands=exitCommands)
# Get output from the console. This should contain both the
# "initCommands" and the "preRunCommands".
output = self.get_console()
# Verify all "initCommands" were found in console output
self.verify_commands('initCommands', output, initCommands)
# Verify all "preRunCommands" were found in console output
self.verify_commands('preRunCommands', output, preRunCommands)
source = 'main.c'
first_line = line_number(source, '// breakpoint 1')
second_line = line_number(source, '// breakpoint 2')
lines = [first_line, second_line]
# Set 2 breakoints so we can verify that "stopCommands" get run as the
# breakpoints get hit
breakpoint_ids = self.set_source_breakpoints(source, lines)
self.assertTrue(len(breakpoint_ids) == len(lines),
"expect correct number of breakpoints")
# Continue after launch and hit the first breakpoint.
# Get output from the console. This should contain both the
# "stopCommands" that were run after the first breakpoint was hit
self.continue_to_breakpoints(breakpoint_ids)
output = self.get_console(timeout=1.0)
self.verify_commands('stopCommands', output, stopCommands)
# Continue again and hit the second breakpoint.
# Get output from the console. This should contain both the
# "stopCommands" that were run after the second breakpoint was hit
self.continue_to_breakpoints(breakpoint_ids)
output = self.get_console(timeout=1.0)
self.verify_commands('stopCommands', output, stopCommands)
# Continue until the program exits
self.continue_to_exit()
# Get output from the console. This should contain both the
# "exitCommands" that were run after the second breakpoint was hit
output = self.get_console(timeout=1.0)
self.verify_commands('exitCommands', output, exitCommands)
@skipIfWindows
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
@no_debug_info_test
def test_extra_launch_commands(self):
'''
Tests the "luanchCommands" with extra launching settings
'''
self.build_and_create_debug_adaptor()
program = self.getBuildArtifact("a.out")
source = 'main.c'
first_line = line_number(source, '// breakpoint 1')
second_line = line_number(source, '// breakpoint 2')
# Set target binary and 2 breakoints
# then we can varify the "launchCommands" get run
# also we can verify that "stopCommands" get run as the
# breakpoints get hit
launchCommands = [
'target create "%s"' % (program),
'br s -f main.c -l %d' % first_line,
'br s -f main.c -l %d' % second_line,
'run'
]
initCommands = ['target list', 'platform list']
preRunCommands = ['image list a.out', 'image dump sections a.out']
stopCommands = ['frame variable', 'bt']
exitCommands = ['expr 2+3', 'expr 3+4']
self.launch(program,
initCommands=initCommands,
preRunCommands=preRunCommands,
stopCommands=stopCommands,
exitCommands=exitCommands,
launchCommands=launchCommands)
# Get output from the console. This should contain both the
# "initCommands" and the "preRunCommands".
output = self.get_console()
# Verify all "initCommands" were found in console output
self.verify_commands('initCommands', output, initCommands)
# Verify all "preRunCommands" were found in console output
self.verify_commands('preRunCommands', output, preRunCommands)
# Verify all "launchCommands" were founc in console output
# After execution, program should launch
self.verify_commands('launchCommands', output, launchCommands)
# Verify the "stopCommands" here
self.continue_to_next_stop()
output = self.get_console(timeout=1.0)
self.verify_commands('stopCommands', output, stopCommands)
# Continue and hit the second breakpoint.
# Get output from the console. This should contain both the
# "stopCommands" that were run after the first breakpoint was hit
self.continue_to_next_stop()
output = self.get_console(timeout=1.0)
self.verify_commands('stopCommands', output, stopCommands)
# Continue until the program exits
self.continue_to_exit()
# Get output from the console. This should contain both the
# "exitCommands" that were run after the second breakpoint was hit
output = self.get_console(timeout=1.0)
self.verify_commands('exitCommands', output, exitCommands)
|
fae1bf9aae479db1b1506958fb78b3d7218413b0
|
e849872aed557595cec6ade1180e5f8bbbc29e27
|
/nilearn/_utils/path_finding.py
|
771758d35364c14dbcd75b068ac30335eda85431
|
[
"BSD-3-Clause"
] |
permissive
|
nilearn/nilearn
|
a071fe00af7d0c605f3030d38588ccfd00cdcb13
|
f0852e127b620a64af0a1ce02282106ce6f068ba
|
refs/heads/main
| 2023-08-31T02:21:22.794164
| 2023-08-31T01:44:59
| 2023-08-31T01:44:59
| 1,235,740
| 1,049
| 590
|
NOASSERTION
| 2023-09-14T21:33:17
| 2011-01-09T19:02:23
|
Python
|
UTF-8
|
Python
| false
| false
| 441
|
py
|
path_finding.py
|
"""Path finding utilities."""
import glob
import os.path
from .helpers import stringify_path
def _resolve_globbing(path):
path = stringify_path(path)
if isinstance(path, str):
path_list = sorted(glob.glob(os.path.expanduser(path)))
# Raise an error in case the list is empty.
if len(path_list) == 0:
raise ValueError(f"No files matching path: {path}")
path = path_list
return path
|
2dad1855bc65c003007720aae0b111f0779e8fa8
|
f8d239baf08742caaafe9c8aac188e8d380450f4
|
/python/core/ltp_core/models/utils/transformer.py
|
8c5be9d9bdf27b61b2518f5292565f7cc7d12ef7
|
[] |
no_license
|
HIT-SCIR/ltp
|
3bef92308b7325d8b793438ac2f1591b8b552c2e
|
2aeeeec80d8d882f3a81d3e4e47286b92b44f129
|
refs/heads/main
| 2023-08-08T13:00:21.405340
| 2023-07-05T03:23:56
| 2023-07-05T03:23:56
| 1,832,321
| 4,661
| 1,087
| null | 2023-09-12T06:39:55
| 2011-06-01T15:10:15
|
Python
|
UTF-8
|
Python
| false
| false
| 168
|
py
|
transformer.py
|
def load_transformers(config):
from transformers import AutoConfig, AutoModel
config = AutoConfig.for_model(**config)
return AutoModel.from_config(config)
|
9b8f3da5407ab9f656a9b9d37f9da845ac8c2c4d
|
2e6b87dccfaf95eded0c26215f42b584cc0ce393
|
/tina/voxl/scale.py
|
d256b1b1d6e29570855325448287567cf4a44623
|
[
"MIT"
] |
permissive
|
taichi-dev/taichi_three
|
2d3c4022436777bbd6005a38f8cc27cd1f442430
|
62596cf36fba1c5a528796c51942ce44ed76292a
|
refs/heads/master
| 2023-06-20T19:59:09.218689
| 2021-07-10T09:32:09
| 2021-07-10T09:32:09
| 272,924,688
| 204
| 30
|
MIT
| 2021-07-10T09:32:10
| 2020-06-17T08:52:31
|
Python
|
UTF-8
|
Python
| false
| false
| 475
|
py
|
scale.py
|
from ..common import *
from .base import VoxlEditBase
class VolumeScale(VoxlEditBase):
def __init__(self, voxl, scale=1):
super().__init__(voxl)
self.scale = ti.field(float, ())
@ti.materialize_callback
def init_scale():
self.scale[None] = scale
def set_scale(self, scale):
self.scale[None] = scale
@ti.func
def sample_volume(self, pos):
return self.voxl.sample_volume(pos) * self.scale[None]
|
869a89ddc9a57399c1b293ab5d5bead97df6a922
|
a4d57ad8975f30bff8c324ad35eaec6c352fecd5
|
/tests/test_jinja_filters.py
|
9b716d01182156c80c75b83294818ddb481b1dba
|
[
"Apache-2.0"
] |
permissive
|
aio-libs/aiohttp-jinja2
|
4dab9dd456fc77f19cf54bb743aaa7f52b861e0e
|
8d2f554f5429d8a6db23385983fe8899c79aa1b0
|
refs/heads/master
| 2023-08-17T00:08:32.475589
| 2023-08-11T13:32:13
| 2023-08-11T13:32:13
| 28,298,614
| 197
| 59
|
Apache-2.0
| 2023-09-14T02:13:34
| 2014-12-21T12:17:21
|
Python
|
UTF-8
|
Python
| false
| false
| 623
|
py
|
test_jinja_filters.py
|
import jinja2
from aiohttp import web
import aiohttp_jinja2
async def test_jinja_filters(aiohttp_client):
@aiohttp_jinja2.template("tmpl.jinja2")
async def index(request):
return {}
def add_2(value):
return value + 2
app = web.Application()
aiohttp_jinja2.setup(
app,
loader=jinja2.DictLoader({"tmpl.jinja2": "{{ 5|add_2 }}"}),
filters={"add_2": add_2},
)
app.router.add_route("GET", "/", index)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 200 == resp.status
txt = await resp.text()
assert "7" == txt
|
38486fd23a06c733bbb63b74fbb90bf23b8f3405
|
48ee50316a950d9bc789ae843477b58b2913bf0d
|
/src/app/test/api/http/unit/handlers/v1/token_test.py
|
a609922189ac6e79caf4a7218b8f1bd82775d85e
|
[
"MIT"
] |
permissive
|
beer-garden/beer-garden
|
f6d1c305a261b59d3cb3389513fc3138004a8d07
|
a5fd2dcc2444409e243d3fdaa43d86695e5cb142
|
refs/heads/develop
| 2023-08-15T11:50:29.833953
| 2023-07-20T03:20:45
| 2023-07-20T03:20:45
| 120,045,001
| 254
| 38
|
MIT
| 2023-07-20T03:20:47
| 2018-02-03T00:13:29
|
Python
|
UTF-8
|
Python
| false
| false
| 7,856
|
py
|
token_test.py
|
# -*- coding: utf-8 -*-
import json
from datetime import datetime, timedelta, timezone
import jwt
import pytest
from tornado.httpclient import HTTPError
from beer_garden.api.http.authentication import issue_token_pair
from beer_garden.db.mongo.models import User, UserToken
@pytest.fixture
def user_password():
yield "supersecret"
@pytest.fixture
def user(user_password):
user = User(username="testuser")
user.set_password(user_password)
user.save()
yield user
user.delete()
class TestTokenAPI:
@pytest.mark.gen_test
def test_post_returns_token_on_valid_login(
self, http_client, app_config_auth_enabled, base_url, user, user_password
):
url = f"{base_url}/api/v1/token"
body = json.dumps({"username": user.username, "password": user_password})
response = yield http_client.fetch(url, method="POST", body=body)
response_body = json.loads(response.body.decode("utf-8"))
access_token = response_body["access"]
token_headers = jwt.get_unverified_header(access_token)
decoded_access_token = jwt.decode(
access_token,
key=app_config_auth_enabled.auth.token_secret,
algorithms=[token_headers["alg"]],
)
refresh_token = response_body["refresh"]
token_headers = jwt.get_unverified_header(refresh_token)
decoded_refresh_token = jwt.decode(
refresh_token,
key=app_config_auth_enabled.auth.token_secret,
algorithms=[token_headers["alg"]],
)
assert response.code == 200
assert decoded_access_token["sub"] == str(user.id)
assert decoded_access_token["jti"] == decoded_refresh_token["jti"]
@pytest.mark.gen_test
def test_post_returns_400_on_invalid_login(self, http_client, base_url, user):
url = f"{base_url}/api/v1/token"
body = json.dumps({"username": user.username, "password": "notmypassword"})
with pytest.raises(HTTPError) as excinfo:
yield http_client.fetch(url, method="POST", body=body)
assert excinfo.value.code == 400
@pytest.mark.gen_test
def test_post_returns_400_when_user_not_found(self, http_client, base_url):
url = f"{base_url}/api/v1/token"
body = json.dumps({"username": "cantfindme", "password": "doesntmatter"})
with pytest.raises(HTTPError) as excinfo:
yield http_client.fetch(url, method="POST", body=body)
assert excinfo.value.code == 400
class TestTokenRefreshAPI:
@pytest.mark.gen_test
def test_post_with_valid_refresh_token_returns_new_token_pair(
self, http_client, base_url, user, app_config_auth_enabled
):
url = f"{base_url}/api/v1/token/refresh"
expiration = datetime.now(tz=timezone.utc) + timedelta(days=1)
refresh_token = issue_token_pair(user, expiration)["refresh"]
body = json.dumps({"refresh": refresh_token})
response = yield http_client.fetch(url, method="POST", body=body)
response_body = json.loads(response.body.decode("utf-8"))
new_access_token = response_body["access"]
token_headers = jwt.get_unverified_header(new_access_token)
decoded_access_token = jwt.decode(
new_access_token,
key=app_config_auth_enabled.auth.token_secret,
algorithms=[token_headers["alg"]],
)
new_refresh_token = response_body["refresh"]
token_headers = jwt.get_unverified_header(new_refresh_token)
decoded_refresh_token = jwt.decode(
new_refresh_token,
key=app_config_auth_enabled.auth.token_secret,
algorithms=[token_headers["alg"]],
)
assert response.code == 200
assert decoded_access_token["sub"] == str(user.id)
assert decoded_access_token["jti"] == decoded_refresh_token["jti"]
# Milliseconds get lost during jwt.encode, so we just check the timedelta
assert (
expiration
- datetime.fromtimestamp(decoded_refresh_token["exp"], tz=timezone.utc)
) < timedelta(seconds=1)
@pytest.mark.gen_test
def test_post_with_expired_refresh_token_returns_400(
self, http_client, base_url, user
):
url = f"{base_url}/api/v1/token/refresh"
refresh_token = issue_token_pair(user, refresh_expiration=datetime.utcnow())[
"refresh"
]
body = json.dumps({"refresh": refresh_token})
with pytest.raises(HTTPError) as excinfo:
yield http_client.fetch(url, method="POST", body=body)
assert excinfo.value.code == 400
@pytest.mark.gen_test
def test_post_with_revoked_refresh_token_returns_400(
self, http_client, base_url, user
):
url = f"{base_url}/api/v1/token/refresh"
refresh_token = issue_token_pair(user, refresh_expiration=datetime.utcnow())[
"refresh"
]
body = json.dumps({"refresh": refresh_token})
UserToken.drop_collection()
with pytest.raises(HTTPError) as excinfo:
yield http_client.fetch(url, method="POST", body=body)
assert excinfo.value.code == 400
@pytest.mark.gen_test
def test_post_with_invalid_refresh_token_returns_400(
self, http_client, base_url, user
):
url = f"{base_url}/api/v1/token/refresh"
refresh_token = "notarealtoken"
body = json.dumps({"refresh": refresh_token})
with pytest.raises(HTTPError) as excinfo:
yield http_client.fetch(url, method="POST", body=body)
assert excinfo.value.code == 400
class TestTokenRevokeAPI:
@pytest.mark.gen_test
def test_post_with_valid_refresh_token_expires_token(
self, http_client, base_url, user, app_config_auth_enabled
):
url = f"{base_url}/api/v1/token/revoke"
refresh_token = issue_token_pair(user)["refresh"]
body = json.dumps({"refresh": refresh_token})
token_headers = jwt.get_unverified_header(refresh_token)
decoded_refresh_token = jwt.decode(
refresh_token,
key=app_config_auth_enabled.auth.token_secret,
algorithms=[token_headers["alg"]],
)
assert len(UserToken.objects.filter(uuid=decoded_refresh_token["jti"])) == 1
response = yield http_client.fetch(url, method="POST", body=body)
assert response.code == 204
assert len(UserToken.objects.filter(uuid=decoded_refresh_token["jti"])) == 0
@pytest.mark.gen_test
def test_post_with_expired_refresh_token_returns_204(
self, http_client, base_url, user
):
url = f"{base_url}/api/v1/token/revoke"
refresh_token = issue_token_pair(user, refresh_expiration=datetime.utcnow())[
"refresh"
]
body = json.dumps({"refresh": refresh_token})
response = yield http_client.fetch(url, method="POST", body=body)
assert response.code == 204
@pytest.mark.gen_test
def test_post_with_revoked_refresh_token_returns_204(
self, http_client, base_url, user
):
url = f"{base_url}/api/v1/token/revoke"
refresh_token = issue_token_pair(user)["refresh"]
body = json.dumps({"refresh": refresh_token})
UserToken.drop_collection()
response = yield http_client.fetch(url, method="POST", body=body)
assert response.code == 204
@pytest.mark.gen_test
def test_post_with_invalid_refresh_token_returns_400(
self, http_client, base_url, user
):
url = f"{base_url}/api/v1/token/revoke"
refresh_token = "notarealtoken"
body = json.dumps({"refresh": refresh_token})
with pytest.raises(HTTPError) as excinfo:
yield http_client.fetch(url, method="POST", body=body)
assert excinfo.value.code == 400
|
9fd509ca6dbac4afdd5e96d7dc88fae56785e85c
|
8e9db280e102f3c4c0d23be3848261aeb451fbde
|
/tilecloud/filter/optipng.py
|
eee770f637791e8f948c42908c98d5f0ae656009
|
[
"BSD-2-Clause"
] |
permissive
|
camptocamp/tilecloud
|
8678a291550253325974b98dbef4b3f09fc2a779
|
0bd8e53d74ad2bd7a7a7aa14795e194cef5dd19f
|
refs/heads/master
| 2023-09-01T16:54:55.755088
| 2023-08-29T13:35:15
| 2023-08-29T13:35:15
| 2,954,583
| 145
| 18
|
BSD-2-Clause
| 2023-09-04T15:46:24
| 2011-12-10T18:21:01
|
Python
|
UTF-8
|
Python
| false
| false
| 861
|
py
|
optipng.py
|
import os
from subprocess import call # nosec
from tempfile import NamedTemporaryFile
from tilecloud import Tile
class OptiPNG:
def __init__(self, options: list[str], arg0: str = "/usr/bin/optipng"):
self.args = [arg0, "-q"] + list(options)
def __call__(self, tile: Tile) -> Tile:
with NamedTemporaryFile(delete=False, suffix=".png") as ntf:
try:
assert tile.data is not None
ntf.write(tile.data)
ntf.close()
retcode = call(self.args + [ntf.name]) # nosec
if retcode == 0:
with open(ntf.name, "rb") as file:
tile.data = file.read()
finally:
try:
os.unlink(ntf.name)
except OSError:
pass
return tile
|
4f6cbb6ad956fa484217dc3484e327e375788a20
|
04282600d7a0860bc4cf5a54684a48464690be29
|
/pddlgym/downward_translate/greedy_join.py
|
d8919eaaddbe8c9629eefe442c146274e73e48b9
|
[
"MIT"
] |
permissive
|
tomsilver/pddlgym
|
1acf2b530a1a524939179cf1ed7505a529685792
|
ea2945d5c26b950325b05360801a69177f666174
|
refs/heads/master
| 2023-05-24T13:52:58.352216
| 2023-05-23T23:52:39
| 2023-05-23T23:52:39
| 227,614,241
| 148
| 47
|
MIT
| 2023-05-23T23:52:40
| 2019-12-12T13:32:29
|
PDDL
|
UTF-8
|
Python
| false
| false
| 4,361
|
py
|
greedy_join.py
|
import sys
from . import pddl
from . import pddl_to_prolog
class OccurrencesTracker:
"""Keeps track of the number of times each variable appears
in a list of symbolic atoms."""
def __init__(self, rule):
self.occurrences = {}
self.update(rule.effect, +1)
for cond in rule.conditions:
self.update(cond, +1)
def update(self, symatom, delta):
for var in symatom.args:
if var[0] == "?":
if var not in self.occurrences:
self.occurrences[var] = 0
self.occurrences[var] += delta
assert self.occurrences[var] >= 0
if not self.occurrences[var]:
del self.occurrences[var]
def variables(self):
return set(self.occurrences)
class CostMatrix:
def __init__(self, joinees):
self.joinees = []
self.cost_matrix = []
for joinee in joinees:
self.add_entry(joinee)
def add_entry(self, joinee):
new_row = [self.compute_join_cost(joinee, other) for other in self.joinees]
self.cost_matrix.append(new_row)
self.joinees.append(joinee)
def delete_entry(self, index):
for row in self.cost_matrix[index + 1:]:
del row[index]
del self.cost_matrix[index]
del self.joinees[index]
def find_min_pair(self):
assert len(self.joinees) >= 2
min_cost = (sys.maxsize, sys.maxsize)
for i, row in enumerate(self.cost_matrix):
for j, entry in enumerate(row):
if entry < min_cost:
min_cost = entry
left_index, right_index = i, j
return left_index, right_index
def remove_min_pair(self):
left_index, right_index = self.find_min_pair()
left, right = self.joinees[left_index], self.joinees[right_index]
assert left_index > right_index
self.delete_entry(left_index)
self.delete_entry(right_index)
return (left, right)
def compute_join_cost(self, left_joinee, right_joinee):
left_vars = pddl_to_prolog.get_variables([left_joinee])
right_vars = pddl_to_prolog.get_variables([right_joinee])
if len(left_vars) > len(right_vars):
left_vars, right_vars = right_vars, left_vars
common_vars = left_vars & right_vars
return (len(left_vars) - len(common_vars),
len(right_vars) - len(common_vars),
-len(common_vars))
def can_join(self):
return len(self.joinees) >= 2
class ResultList:
def __init__(self, rule, name_generator):
self.final_effect = rule.effect
self.result = []
self.name_generator = name_generator
def get_result(self):
self.result[-1].effect = self.final_effect
return self.result
def add_rule(self, type, conditions, effect_vars):
effect = pddl.Atom(next(self.name_generator), effect_vars)
rule = pddl_to_prolog.Rule(conditions, effect)
rule.type = type
self.result.append(rule)
return rule.effect
def greedy_join(rule, name_generator):
assert len(rule.conditions) >= 2
cost_matrix = CostMatrix(rule.conditions)
occurrences = OccurrencesTracker(rule)
result = ResultList(rule, name_generator)
while cost_matrix.can_join():
joinees = list(cost_matrix.remove_min_pair())
for joinee in joinees:
occurrences.update(joinee, -1)
common_vars = set(joinees[0].args) & set(joinees[1].args)
condition_vars = set(joinees[0].args) | set(joinees[1].args)
effect_vars = occurrences.variables() & condition_vars
for i, joinee in enumerate(joinees):
joinee_vars = set(joinee.args)
retained_vars = joinee_vars & (effect_vars | common_vars)
if retained_vars != joinee_vars:
joinees[i] = result.add_rule("project", [joinee], sorted(retained_vars))
joint_condition = result.add_rule("join", joinees, sorted(effect_vars))
cost_matrix.add_entry(joint_condition)
occurrences.update(joint_condition, +1)
# assert occurrences.variables() == set(rule.effect.args)
# for var in set(rule.effect.args):
# assert occurrences.occurrences[var] == 2 * rule.effect.args.count(var)
return result.get_result()
|
44bd27c8d0bc64a5e4f4f566bc4c59b13f6a00b4
|
5f1fafe92ff5d704c4da4c1b2204a94099eb2d7a
|
/bot_test.py
|
b2d8b729570bc6dd86da39b2d20de264d2b8c53e
|
[
"MIT"
] |
permissive
|
soxoj/telegram-bot-dumper
|
41204bdd0709afe7459a99fe7a7a37bc55de26a5
|
bfaf1c22ab0db6d514a00846d7bb4349a6963418
|
refs/heads/master
| 2023-08-17T01:37:53.791928
| 2022-04-24T15:13:12
| 2022-04-24T15:13:12
| 189,640,323
| 108
| 25
|
MIT
| 2023-04-14T15:10:05
| 2019-05-31T18:27:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,145
|
py
|
bot_test.py
|
import json
import shutil
import os
import pytest
from telethon.sync import TelegramClient
from dumper import *
bot = None
@pytest.mark.asyncio
async def test_dumper():
global bot
if os.path.exists('5080069482'):
shutil.rmtree('5080069482')
bot = await bot_auth(os.getenv('TEST_TOKEN'))
assert bot is not None
base_path = '5080069482'
await get_chat_history(bot, from_id=200, to_id=0, lookahead=0)
assert os.path.exists('5080069482') == True
bot_info = json.load(open('5080069482/bot.json'))
assert bot_info['bot'] == True
assert os.path.exists('5080069482/378410969') == True
assert os.path.exists('5080069482/660191274') == True
assert os.path.exists('5080069482/378410969/378410969_history.txt') == True
assert os.path.exists('5080069482/378410969/378410969.json') == True
assert os.path.exists('5080069482/378410969/1625262736758908858.jpg') == True
assert os.path.exists('5080069482/660191274/660191274_history.txt') == True
soxoj_info = json.load(open('5080069482/378410969/378410969.json'))
soxoj_info['first_name'] == 'Soxoj'
soxoj_history = open('5080069482/378410969/378410969_history.txt').read()
assert soxoj_history == """[1][378410969][2021-12-05 13:59:58+00:00] /start
[2][378410969][2021-12-05 14:00:00+00:00] test
[3][378410969][2021-12-05 14:00:20+00:00] Document: media/02.jpeg
[4][378410969][2021-12-05 14:00:40+00:00] Photo: media/5287469397440576034.jpg
[8][378410969][2021-12-05 14:05:57+00:00] 123
[9][5080069482][2021-12-05 14:05:58+00:00] 123
"""
chat_history = open('5080069482/660191274/660191274_history.txt').read()
assert chat_history == """[5][378410969][2021-12-05 14:01:23+00:00] MessageActionChatCreate(title='Soxoj & Test Dumper Serjfios34', users=[378410969, 5080069482])
[6][378410969][2021-12-05 14:01:23+00:00] Photo of chat was changed: media/5289906529388049862.jpg
[7][378410969][2021-12-05 14:01:37+00:00] MessageActionChatDeleteUser(user_id=5080069482)
"""
@pytest.fixture(autouse=True)
@pytest.mark.asyncio
async def exit_pytest_first_failure():
yield
await bot.disconnect()
|
ab7931b837cb0e5e04996066c35e54018848fce6
|
b2fef77e77f77b6cfd83da4ec2f89cbe73330844
|
/tests/test_rand_elastic_3d.py
|
0ff3ef6129516e932c9f7b8b0c0fa62b4ae05aec
|
[
"Apache-2.0"
] |
permissive
|
Project-MONAI/MONAI
|
8ef2593cc5fd1cd16e13464f927fe563fe3f5bac
|
e48c3e2c741fa3fc705c4425d17ac4a5afac6c47
|
refs/heads/dev
| 2023-09-02T00:21:04.532596
| 2023-09-01T06:46:45
| 2023-09-01T06:46:45
| 214,485,001
| 4,805
| 996
|
Apache-2.0
| 2023-09-14T15:19:30
| 2019-10-11T16:41:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,762
|
py
|
test_rand_elastic_3d.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.data import MetaTensor, set_track_meta
from monai.transforms import Rand3DElastic
from tests.utils import TEST_NDARRAYS_ALL, assert_allclose
TESTS = []
for p in TEST_NDARRAYS_ALL:
for device in [None, "cpu", "cuda"] if torch.cuda.is_available() else [None, "cpu"]:
TESTS.append(
[
{
"magnitude_range": (0.3, 2.3),
"sigma_range": (1.0, 20.0),
"prob": 0.0,
"device": device,
"spatial_size": -1,
},
{"img": p(torch.arange(72).reshape((2, 3, 3, 4)))},
p(np.arange(72).reshape((2, 3, 3, 4))),
]
)
TESTS.append(
[
{"magnitude_range": (0.3, 2.3), "sigma_range": (1.0, 20.0), "prob": 0.0, "device": device},
{"img": p(torch.ones((2, 3, 3, 3))), "spatial_size": (2, 2, 2)},
p(np.ones((2, 2, 2, 2))),
]
)
TESTS.append(
[
{"magnitude_range": (0.3, 0.3), "sigma_range": (1.0, 2.0), "prob": 0.9, "device": device},
{"img": p(torch.arange(27).reshape((1, 3, 3, 3))), "spatial_size": (2, 2, 2)},
p(
np.array(
[
[
[[6.4939356, 7.50289], [9.518351, 10.522849]],
[[15.512375, 16.523542], [18.531467, 19.53646]],
]
]
)
),
]
)
TESTS.append(
[
{
"magnitude_range": (0.3, 0.3),
"sigma_range": (1.0, 2.0),
"prob": 0.9,
"rotate_range": [1, 1, 1],
"device": device,
"spatial_size": (2, 2, 2),
},
{"img": p(torch.arange(27).reshape((1, 3, 3, 3))), "mode": "bilinear"},
p(
np.array(
[
[
[[5.0069294, 9.463932], [9.287769, 13.739735]],
[[12.319424, 16.777205], [16.594296, 21.045748]],
]
]
)
),
]
)
class TestRand3DElastic(unittest.TestCase):
@parameterized.expand(TESTS)
def test_rand_3d_elastic(self, input_param, input_data, expected_val):
g = Rand3DElastic(**input_param)
set_track_meta(False)
g.set_random_state(123)
result = g(**input_data)
self.assertNotIsInstance(result, MetaTensor)
self.assertIsInstance(result, torch.Tensor)
set_track_meta(True)
g.set_random_state(123)
result = g(**input_data)
assert_allclose(result, expected_val, type_test=False, rtol=1e-1, atol=1e-1)
if __name__ == "__main__":
unittest.main()
|
4d9957576ad1ac1ab60d339e2d2eb179dc20a7ad
|
c364fdae67ad5298d03d14d442ef890233c45724
|
/pymel/tools/mel2py/__init__.py
|
b9691fa71c6a7a06e1454275ad3f9b274b07ffa1
|
[
"BSD-3-Clause"
] |
permissive
|
LumaPictures/pymel
|
952b376b1bf4d2cc99c3f99c6c6b4dbc35edd065
|
5fbe189fc0e0e1fdf056be2dd2ae63d26ca33ed5
|
refs/heads/master
| 2023-08-30T01:17:01.855520
| 2023-04-12T15:48:35
| 2023-04-12T15:48:35
| 404,345
| 388
| 128
|
NOASSERTION
| 2023-09-02T00:00:17
| 2009-12-07T19:53:19
|
Python
|
UTF-8
|
Python
| false
| false
| 22,173
|
py
|
__init__.py
|
"""
Convert mel code into python code.
==========================
Mel To Python Translator
==========================
Known Limitations
=================
array index assignment
----------------------
In mel, you can directly assign the value of any element in an array, and all intermediate elements will be
automatically filled. This is not the case in python: if the list index is out of range an IndexError will be
raised. I've added fixes for several common array assignment conventions:
append new element
~~~~~~~~~~~~~~~~~~
MEL::
string $strArray[];
$strArray[`size $strArray`] = "foo";
Python::
strArray = []
strArray.append("foo")
assignment relative to end of array
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MEL::
strArray[`size $strArray`-3] = "foo";
Python::
strArray[-3] = "foo"
However, since the translator does not track values of variables, it does not know if any given index is out of
range or not. so, the following would raise a 'list assignment index out of range' error when converted to
python and would need to be manually fixed::
string $strArray[];
for ($i=0; $i<5; $i++)
$strArray[$i] = "foo"
for(init; condition; update)
----------------------------
the closest equivalent to this in python is something akin to::
for i in range(start, end):
...
in order for this type of for loop to be translated into a python for loop it must meet several requirements:
1. the initialization, condition, and update expressions must not be empty.
not translatable::
for(; ; $i++) print $i;
2. there can be only one conditional expression.
not translatable::
for($i=0; $i<10, $j<20; $i++) print $i;
3. the variable which is being updated and tested in the condition (aka, the iterator) must exist alone on one
side of the conditional expression. this one is easy enough to fix, just do some algebra:
not translatable::
for($i=0; ($i-2)<10, $i++) print $i;
translatable::
for($i=0; $i<(10+2), $i++) print $i;
4. the iterator can appear only once in the update expression:
not translatable::
for($i=0; $i<10; $i++, $i+=2) print $i;
if these conditions are not met, the for loop will be converted into a while loop::
i=0
while 1:
if not ( (i - 2)<10 ):
break
print i
i+=1
Inconveniences
==============
Switch Statements
-----------------
Alas, switch statements are not supported by python. the translator will convert them into an if/elif/else statement.
Global Variables
----------------
Global variables are not shared between mel and python. two functions have been added to pymel for this purpose:
`pymel.core.langauage.getMelGlobal` and `pymel.core.langauage.setMelGlobal`. by default, the translator will convert mel global variables into python global
variables AND intialize them to the value of their corresponding mel global variable using `getMelGlobal()`. if your
python global variable does not need to be shared with other mel scripts, you can remove the get- and
setMelGlobals lines (for how to filter global variables, see below). however, if it does need to be shared, it is very
important that you manually add `setMelGlobal()` to update the variable in the mel environment before calling any mel
procedures that will use the global variable.
In order to hone the accuracy of the translation of global variables, you will find two dictionary parameters below --
`global_var_include_regex` and `global_var_exclude_regex` -- which you can use to set a regular expression string
to tell the translator which global variables to share with the mel environment (i.e. which will use the get and set
methods described above) and which to not. for instance, in my case, it is desirable for all of maya's global
variables to be initialized from their mel value but for our in-house variables not to be, since the latter are often
used to pass values within a single script. see below for the actual regular expressions used to accomplish this.
Comments
--------
Rules on where comments may be placed is more strict in python, so expect your comments to be shifted around a bit
after translation.
Formatting
----------
Much of the formatting of your original script will be lost. I apologize for this, but python is much more strict
about formatting than mel, so the conversion is infinitely simpler when the formatting is largely discarded
and reconstructed based on pythonic rules.
Solutions and Caveats
=====================
catch and catchQuiet
--------------------
There is no direct equivalent in python to the catch and catchQuiet command and it does not exist in maya.cmds so i wrote two
python commands of the same name and put them into pymel. these are provided primarily for compatibility with
automatically translated scripts. try/except statements should be used instead of catch or catchQuiet if coding
from scratch.
for( $elem in $list )
---------------------
This variety of for loop has a direct syntactical equivalent in python. the only catch here is that maya.cmds
functions which are supposed to return lists, return None when there are no matches. life would be much simpler
if they returned empty lists instead. the solution currently lies in pymel, where i have begun
correcting all of these command to return proper results. i've started with the obvious ones, but there
are many more that i need to fix. you'll know you hit the problem when you get this error: 'TypeError: iteration
over non-sequence'. just email me with commands that are giving you problems and i'll fix them as
quickly as i can.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from . import melparse
try:
from pymel.util.external.ply.lex import LexError
except ImportError:
from ply.lex import LexError
import pymel.util as util
import pymel.internal as internal
import pymel.internal.factories as _factories
import pymel
import pymel.core as pm
import os
if False:
from typing import *
log = internal.getLogger(__name__)
"""
This is a dictionary for custom remappings of mel procedures into python functions, classes, etc. If you are like me you probably have a
library of helper mel scripts to make your life a bit easier. you will probably find that python has a built-in equivalent for many of
these.
i've provided a few entries as examples to show you how to implement remappings in mel2py. the first procedure in the dictionary is
'firstElem', which simply returns the first element of a string array, useful when the first element of a command is all you need. as you
can see, the key in the dictionary is the procedure name, and the value is a function which takes two inputs: a list of arguments to the
procedure being remapped, and a ply yacc token object, which you probably will not need to use. the function should return a string
representing the new command. also, note that the list of arguments will all be strings and will already be converted into their python
equivalents. in the case of 'firstElem', it will perform conversions like the following:
firstElem( ls(sl=1) ) --> ls(sl=1)[0]
firstElem( myListVar ) --> myListVar[0]
"""
custom_proc_remap = {
'firstElem': ('string', lambda args, t: '%s[0]' % (args[0])),
'firstFloatElem': ('float', lambda args, t: '%s[0]' % (args[0])),
'stringArrayAppend': ('string[]', lambda args, t: '%s + %s' % (args[0], args[1])),
'stringInArray': ('int', lambda args, t: '%s in %s' % (args[0], args[1])),
'stringInStringArray': ('int', lambda args, t: '%s in %s' % (args[0], args[1])),
'stringArrayPrefix': ('string[]', lambda args, t: '[ %s + x for x in %s ]' % (args[0], args[1])),
'stringArraySuffix': ('string[]', lambda args, t: '[ x + %s for x in %s ]' % (args[0], args[1])),
'addPad': ('string', lambda args, t: "'%0" + args[1] + "d' % " + args[0]),
'getRefFileFromObject': ('string', lambda args, t: '%s.referenceFile()' % (args[0]))
}
# do not change the following line !!!
melparse.proc_remap.update(custom_proc_remap)
def resolvePath(melobj, recurse=False, exclude=(), melPathOnly=False, basePackage=''):
"""
if passed a directory, get all mel files in the directory
if passed a file, ensure it is a mel file
if passed a procedure name, find its file
Returns tuples of the form (moduleName, melfile).
"""
if basePackage is None:
basePackage = ''
files = []
recursedResults = []
filepath = util.path(melobj)
if filepath.isfile():
if filepath.ext == '.mel':
files = [filepath.truepath()]
else:
log.warning("File is not a mel script: %s" % (filepath))
files = []
elif filepath.isdir():
files = [f.truepath() for f in filepath.files('[a-zA-Z]*.mel')]
if recurse:
for dir in filepath.dirs():
recursedResults.extend(
resolvePath(dir,
recurse=recurse,
exclude=exclude,
melPathOnly=melPathOnly,
basePackage=basePackage + '.' + melparse.pythonizeName(dir.basename())))
# elif not filepath.exists():
else:
# see if it's a procedure that we can derive a path from
try:
info = pm.mel.whatIs(melobj).split(': ')[-1]
assert info != 'Unknown', "If providing a procedure or a short file name, ensure the appropriate script is sourced"
melfile = util.path(info)
files = [melfile.truepath()]
except Exception as msg:
log.warning("Could not determine mel script from input '%s': %s." % (filepath, msg))
if exclude:
for i, badFile in enumerate(exclude):
badFile = util.path(badFile).canonicalpath()
if badFile.isdir():
badFile = badFile + os.sep
exclude[i] = badFile
filteredFiles = []
for f in files:
fileGood = True
for badFile in exclude:
if f.samepath(badFile) \
or (badFile.isdir()
and f.canonicalpath().startswith(badFile)):
fileGood = False
if fileGood:
filteredFiles.append(f)
files = filteredFiles
if melPathOnly:
files = [x for x in files if fileOnMelPath(x)]
if basePackage and basePackage[-1] != '.':
basePackage = basePackage + '.'
return [(basePackage + melparse.getModuleBasename(x), x) for x in files] + recursedResults
def fileOnMelPath(file):
"""
Return True if this file is on the mel path.
"""
file = util.path(file)
info = pm.mel.whatIs(file.basename()).split(': ', 1)
if len(info) < 2:
# If there wasn't a ':' character, the result was probably 'Unknown, or something similar -
# anyway, not what we're looking for
return False
if info[0] not in ('Mel procedure found in', 'Script found in'):
return False
path = util.path(info[1])
return path.samepath(file)
def _updateCurrentModules(newResults):
currentModules = melparse.batchData.currentModules
for moduleName, melfile in newResults:
if not isinstance(melfile, pm.Path):
melfile = util.path(melfile)
if melfile in currentModules.values():
oldModule = currentModules.get_key(melfile)
if oldModule == moduleName:
continue
if moduleName.count('.') >= oldModule.count('.'):
continue
elif moduleName in currentModules:
raise RuntimeError('two mel files result in same python module name: %s, %s => %s' % (currentModules[moduleName], melfile, moduleName))
currentModules[moduleName] = melfile
def _makePackages():
# Maps from a package (in tuple form) to base directory
packages = {}
for moduleName, melfile in melparse.batchData.currentModules.items():
if moduleName.count('.') < 1:
continue
package = tuple(moduleName.split('.')[:-1])
if melparse.batchData.outputDir:
packages[package] = melparse.batchData.outputDir
else:
assert package == tuple(melfile.splitall()[-(len(package) + 1):-1]), \
"package %s did not match melfile %s directory structure" % ('.'.join(package), melfile)
packages[package] = util.path.joinpath(*(melfile.splitall()[:-(len(package) + 1)]))
for packageTuple, baseDir in packages.items():
if not baseDir.isdir():
baseDir.makedirs()
curDir = baseDir
for nextDir in packageTuple:
curDir = curDir / nextDir
if not curDir.isdir():
curDir.mkdir()
initFile = curDir / '__init__.py'
if not initFile.isfile():
initFile.touch()
def _getInputFiles(input, recurse=False, exclude=(), melPathOnly=False, basePackage=''):
"""
Returns tuples of the form (packageName, melfile)
"""
results = []
if not util.isIterable(input):
input = [input]
for f in input:
results.extend(
resolvePath(f,
recurse=recurse,
exclude=exclude,
melPathOnly=melPathOnly,
basePackage=basePackage))
return results
def melInfo(input):
# type: (str) -> Tuple[Iterable[str], dict, dict]
"""
Get information about procedures in a mel file.
>>> import pymel.tools.mel2py as mel2py
>>> mel2py.melInfo('attributeExists')
(['attributeExists'], {'attributeExists': {'returnType': 'int', 'args': [('string', '$attr'), ('string', '$node')]}}, {})
Parameters
----------
input : str
can be a mel file or a sourced mel procedure
Returns
-------
allProcs : Iterable[str]
The list of procedures in the order the are defined
globalProcs : dict
A dictionary of global procedures, with the following entries:
- returnType: mel type to be returned
- args: a list of (type, variable_name) pairs
localProcs : dict
A dictionary of local procedures, formatted the same as with globals
"""
# TODO: change this to use _getInputFiles, with an option to prevent recursing directories
res = resolvePath(input)
if len(res) != 1:
raise ValueError("input must be a mel script or a known procedure from a sourced mel script.")
f = res[0][1]
cbParser = melparse.MelScanner()
cbParser.build()
return cbParser.parse(f.bytes())
def mel2pyStr(data, currentModule=None, pymelNamespace='', forceCompatibility=False, verbosity=0, basePackage=None):
# type: (str, Optional[str], str, bool, int, Any) -> str
"""
convert a string representing mel code into a string representing python code
>>> import pymel.tools.mel2py as mel2py
>>> print(mel2py.mel2pyStr('paneLayout -e -configuration "top3" test;'))
from pymel.all import *
paneLayout('test',configuration="top3",e=1)
<BLANKLINE>
Note that when converting single lines, the lines must end in a semi-colon,
otherwise it is technically invalid syntax.
Parameters
----------
data : `str`
string representing coe to convert
currentModule : Optional[str]
the name of the module that the hypothetical code is executing in.
In most cases you will leave it at its default, the __main__ namespace.
pymelNamespace : `str`
the namespace into which pymel will be imported. the default is '',
which means ``from pymel.all import *``
forceCompatibility : `bool`
If True, the translator will attempt to use non-standard python types in order to produce
python code which more exactly reproduces the behavior of the original mel file, but which
will produce "uglier" code. Use this option if you wish to produce the most reliable code
without any manual cleanup.
verbosity : `int`
Set to non-zero for a *lot* of feedback
"""
mparser = melparse.MelParser()
mparser.build(currentModule, pymelNamespace=pymelNamespace, forceCompatibility=forceCompatibility, verbosity=verbosity)
results = mparser.parse(data)
# print mparser.lexer.global_procs
return results
def mel2py(input, outputDir=None,
pymelNamespace='', forceCompatibility=False,
verbosity=0, test=False,
recurse=False, exclude=(), melPathOnly=False,
basePackage=None):
# type: (Any, Optional[str], str, bool, int, bool, bool, Iterable[str], bool, Optional[str]) -> None
"""
Batch convert an entire directory
Parameters
----------
input
May be a directory, a list of directories, the name of a mel file, a
list of mel files, or the name of a sourced procedure.
If only the name of the mel file is passed, mel2py will attempt to
determine the location of the file using the 'whatIs' mel command,
which relies on the script already being sourced by maya.
outputDir : Optional[str]
Directory where resulting python files will be written to
pymelNamespace : `str`
the namespace into which pymel will be imported. the default is '',
which means ``from pymel.all import *``
forceCompatibility : `bool`
If True, the translator will attempt to use non-standard python types in order to produce
python code which more exactly reproduces the behavior of the original mel file, but which
will produce "uglier" code. Use this option if you wish to produce the most reliable code
without any manual cleanup.
verbosity : `int`
Set to non-zero for a *lot* of feedback
test : `bool`
After translation, attempt to import the modules to test for errors
recurse : `bool`
If the input is a directory, whether or not to recursively search subdirectories as well.
Subdirectories will be converted into packages, and any mel files within those subdirectories
will be submodules of that package.
exclude : Iterable[str]
A comma-separated list of files/directories to exclude from processing, if input is a directory.
melPathOnly : `bool`
If true, will only translate mel files found on the mel script path.
basePackage : Optional[str]
Gives the package that all translated modules will be a part of; if None or an empty string, all
translated modules are assumed to have no base package.
"""
if basePackage is None:
basePackage = ''
melparse.batchData = melparse.BatchData()
batchData = melparse.batchData
batchData.basePackage = basePackage
if outputDir is not None:
outputDir = util.path(outputDir)
batchData.outputDir = outputDir
if outputDir and not os.path.exists(outputDir):
os.makedirs(outputDir)
currentFiles = _getInputFiles(input, recurse=recurse, exclude=exclude, melPathOnly=melPathOnly, basePackage=basePackage)
if not currentFiles:
raise ValueError("Could not find any scripts to operate on. Please pass a directory, a list of directories, the name of a mel file, a list of mel files, or the name of a sourced procedure")
_updateCurrentModules(currentFiles)
_makePackages()
importCnt = 0
succeeded = []
for moduleName, melfile in batchData.currentModules.items():
print(melfile, moduleName)
if melfile in batchData.scriptPath_to_moduleText:
print("Using pre-converted mel script", melfile)
converted = batchData.scriptPath_to_moduleText[melfile]
else:
data = melfile.bytes()
print("Converting mel script", melfile)
try:
converted = mel2pyStr(data, moduleName, pymelNamespace=pymelNamespace, verbosity=verbosity)
except melparse.MelParseError as e:
if e.file is None:
e.file = melfile
raise
header = """%s from mel file:
# %s
""" % (melparse.tag, melfile)
converted = header + converted
splitModule = moduleName.split('.')
if outputDir is None:
currOutDir = melfile.parent
else:
currOutDir = outputDir
if len(splitModule) > 1:
currOutDir = currOutDir.joinpath(*splitModule[:-1])
pyfile = currOutDir.joinpath(splitModule[-1] + '.py')
print("Writing converted python script: %s" % pyfile)
pyfile.write_bytes(converted)
succeeded.append(pyfile)
# except (ValueError, IndexError, TypeError, LexError), msg:
# if ignoreErrors:
# print 'failed:', msg
# else:
# raise Exception, msg
#
if test:
for pyfile in succeeded:
print("Testing", pyfile)
try:
__import__(pyfile.namebase)
except (SyntaxError, IndentationError) as msg:
print('A syntax error exists in this file that will need to be manually fixed: %s' % msg)
except RuntimeError as msg:
print('This file has code which executed on import and failed: %s' % msg)
except ImportError as msg:
print('%s' % msg)
except Exception as msg:
print('This file has code which executed on import and failed: %s' % msg)
else:
importCnt += 1
succCnt = len(succeeded)
print("%d total processed for conversion" % len(batchData.currentModules))
print("%d files succeeded" % succCnt)
print("%d files failed" % (len(batchData.currentModules) - succCnt))
if test:
print("%d files imported without error" % (importCnt))
succCnt = 0
|
e6a18bb9ea61ae2c0319be20957e63ded2e7b665
|
6f509fd95f182099f5447b6e597a03eedb9bb408
|
/setup.py
|
24abab1f559a890d6f6e6268e39a08dd050bfde5
|
[
"MIT"
] |
permissive
|
trek10inc/awsume
|
596e15aa74f0e896430bb6e383ac12e87ae62aa7
|
c9062cb6bd0d3067ba61558c445a92db0fde6e3a
|
refs/heads/master
| 2023-08-23T20:43:39.104563
| 2023-08-18T20:24:45
| 2023-08-18T20:24:45
| 55,160,220
| 790
| 105
|
MIT
| 2023-09-12T20:58:04
| 2016-03-31T15:12:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
setup.py
|
import fastentrypoints
from setuptools import setup, find_packages
import awsume
from awsume.configure.post_install import CustomInstall
setup(
name=awsume.__NAME__,
packages=find_packages(),
version=awsume.__VERSION__,
author=awsume.__AUTHOR__,
author_email=awsume.__AUTHOR_EMAIL__,
description=awsume.__DESCRIPTION__,
long_description=open('README.md', 'r').read(),
long_description_content_type='text/markdown',
license=awsume.__LICENSE__,
url=awsume.__HOMEPAGE__,
install_requires=[
'colorama',
'boto3',
'psutil',
'pluggy',
'pyyaml',
],
extras_require={
'saml': ['xmltodict'],
'fuzzy': ['python-levenshtein'],
'console': ['awsume-console-plugin'],
},
scripts=[
'shell_scripts/awsume',
'shell_scripts/awsume.ps1',
'shell_scripts/awsume.bat',
'shell_scripts/awsume.fish',
],
entry_points={
'console_scripts': [
'awsumepy=awsume.awsumepy.main:main',
'autoawsume=awsume.autoawsume.main:main',
'awsume-configure=awsume.configure.main:main',
'awsume-autocomplete=awsume_autocomplete:main',
],
},
python_requires='>=3.5',
cmdclass={
'install': CustomInstall,
},
)
|
030f561c89bd3b9ab027a5bb6fded99f74cc9b0a
|
572afc77a246acb9483b47fc9e1839f47005d736
|
/python/federatedml/nn/backend/utils/deepspeed_util.py
|
8cadcfb89417fce80eac6bccd6bf8b2632fdda93
|
[
"Apache-2.0"
] |
permissive
|
FederatedAI/FATE
|
7c787c308cca9ff46f287d24569c68de0a1cac07
|
8767db5ec0cb93784f64b290bc39b7b545c530fb
|
refs/heads/master
| 2023-08-17T10:13:00.302529
| 2023-06-14T07:01:38
| 2023-06-14T07:01:38
| 167,349,656
| 4,942
| 1,571
|
Apache-2.0
| 2023-09-14T07:02:29
| 2019-01-24T10:32:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
deepspeed_util.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
try:
import deepspeed
except ModuleNotFoundError:
from federatedml.util import LOGGER
LOGGER.warning("Try to Import DeepSpeed ERROR, Will Not Support Using DeepSpeed")
def deepspeed_init(model, ds_config):
deepspeed.init_distributed()
model_parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
model, optimizer, _, _ = deepspeed.initialize(model=model,
model_parameters=model_parameters,
config=ds_config)
return model, optimizer
def is_zero3(ds_config):
return ds_config.get("zero_optimization", {}).get("stage", -1) == 3
def init_deepspeed_env(ds_config):
"""
to enabled deepspeed stage3, these should be call first
"""
if is_zero3(ds_config):
from transformers.deepspeed import HfDeepSpeedConfig
HfDeepSpeedConfig(ds_config)
def gather_model(model):
while hasattr(model, "module"):
model = model.module
for _, p in model.named_parameters():
p.all_gather()
|
a313b0b68b9302588356f550d6050194d459ce36
|
bb71b5b3ef0e6eb5cfd27e943e206f40cd0aeb90
|
/azurelinuxagent/common/cgroupapi.py
|
ca0ef3bb5b2e2781795679080aec684081aae90a
|
[
"Apache-2.0"
] |
permissive
|
Azure/WALinuxAgent
|
c35af1df7b52e3e9621757fe7992f3fa4c7c8c49
|
28345a55f9b21dae89472111635fd6e41809d958
|
refs/heads/master
| 2023-08-24T22:08:56.646723
| 2023-07-27T00:44:46
| 2023-07-27T00:44:46
| 4,576,639
| 473
| 436
|
Apache-2.0
| 2023-09-14T20:11:34
| 2012-06-06T18:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 16,950
|
py
|
cgroupapi.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
import os
import re
import shutil
import subprocess
import threading
import uuid
from azurelinuxagent.common import logger
from azurelinuxagent.common.cgroup import CpuCgroup, MemoryCgroup
from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry
from azurelinuxagent.common.conf import get_agent_pid_file_path
from azurelinuxagent.common.exception import CGroupsException, ExtensionErrorCodes, ExtensionError, \
ExtensionOperationError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.osutil import systemd
from azurelinuxagent.common.utils import fileutil, shellutil
from azurelinuxagent.common.utils.extensionprocessutil import handle_process_completion, read_output, \
TELEMETRY_MESSAGE_MAX_LEN
from azurelinuxagent.common.utils.flexible_version import FlexibleVersion
from azurelinuxagent.common.version import get_distro
CGROUPS_FILE_SYSTEM_ROOT = '/sys/fs/cgroup'
CGROUP_CONTROLLERS = ["cpu", "memory"]
EXTENSION_SLICE_PREFIX = "azure-vmextensions"
class SystemdRunError(CGroupsException):
"""
Raised when systemd-run fails
"""
def __init__(self, msg=None):
super(SystemdRunError, self).__init__(msg)
class CGroupsApi(object):
@staticmethod
def cgroups_supported():
distro_info = get_distro()
distro_name = distro_info[0]
try:
distro_version = FlexibleVersion(distro_info[1])
except ValueError:
return False
return distro_name.lower() == 'ubuntu' and distro_version.major >= 16
@staticmethod
def track_cgroups(extension_cgroups):
try:
for cgroup in extension_cgroups:
CGroupsTelemetry.track_cgroup(cgroup)
except Exception as exception:
logger.warn("Cannot add cgroup '{0}' to tracking list; resource usage will not be tracked. "
"Error: {1}".format(cgroup.path, ustr(exception)))
@staticmethod
def get_processes_in_cgroup(cgroup_path):
with open(os.path.join(cgroup_path, "cgroup.procs"), "r") as cgroup_procs:
return [int(pid) for pid in cgroup_procs.read().split()]
@staticmethod
def _foreach_legacy_cgroup(operation):
"""
Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent;
starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. Also,
when running under systemd, the PIDs should not be explicitly moved to the cgroup filesystem. The older daemons would
incorrectly do that under certain conditions.
This method checks for the existence of the legacy cgroups and, if the daemon's PID has been added to them, executes the
given operation on the cgroups. After this check, the method attempts to remove the legacy cgroups.
:param operation:
The function to execute on each legacy cgroup. It must take 2 arguments: the controller and the daemon's PID
"""
legacy_cgroups = []
for controller in ['cpu', 'memory']:
cgroup = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, "WALinuxAgent", "WALinuxAgent")
if os.path.exists(cgroup):
logger.info('Found legacy cgroup {0}', cgroup)
legacy_cgroups.append((controller, cgroup))
try:
for controller, cgroup in legacy_cgroups:
procs_file = os.path.join(cgroup, "cgroup.procs")
if os.path.exists(procs_file):
procs_file_contents = fileutil.read_file(procs_file).strip()
daemon_pid = CGroupsApi.get_daemon_pid()
if ustr(daemon_pid) in procs_file_contents:
operation(controller, daemon_pid)
finally:
for _, cgroup in legacy_cgroups:
logger.info('Removing {0}', cgroup)
shutil.rmtree(cgroup, ignore_errors=True)
return len(legacy_cgroups)
@staticmethod
def get_daemon_pid():
return int(fileutil.read_file(get_agent_pid_file_path()).strip())
class SystemdCgroupsApi(CGroupsApi):
"""
Cgroups interface via systemd
"""
def __init__(self):
self._cgroup_mountpoints = None
self._agent_unit_name = None
self._systemd_run_commands = []
self._systemd_run_commands_lock = threading.RLock()
def get_systemd_run_commands(self):
"""
Returns a list of the systemd-run commands currently running (given as PIDs)
"""
with self._systemd_run_commands_lock:
return self._systemd_run_commands[:]
def get_cgroup_mount_points(self):
"""
Returns a tuple with the mount points for the cpu and memory controllers; the values can be None
if the corresponding controller is not mounted
"""
# the output of mount is similar to
# $ mount -t cgroup
# cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd)
# cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct)
# cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)
# etc
#
if self._cgroup_mountpoints is None:
cpu = None
memory = None
for line in shellutil.run_command(['mount', '-t', 'cgroup']).splitlines():
match = re.search(r'on\s+(?P<path>/\S+(memory|cpuacct))\s', line)
if match is not None:
path = match.group('path')
if 'cpuacct' in path:
cpu = path
else:
memory = path
self._cgroup_mountpoints = {'cpu': cpu, 'memory': memory}
return self._cgroup_mountpoints['cpu'], self._cgroup_mountpoints['memory']
@staticmethod
def get_process_cgroup_relative_paths(process_id):
"""
Returns a tuple with the path of the cpu and memory cgroups for the given process (relative to the mount point of the corresponding
controller).
The 'process_id' can be a numeric PID or the string "self" for the current process.
The values returned can be None if the process is not in a cgroup for that controller (e.g. the controller is not mounted).
"""
# The contents of the file are similar to
# # cat /proc/1218/cgroup
# 10:memory:/system.slice/walinuxagent.service
# 3:cpu,cpuacct:/system.slice/walinuxagent.service
# etc
cpu_path = None
memory_path = None
for line in fileutil.read_file("/proc/{0}/cgroup".format(process_id)).splitlines():
match = re.match(r'\d+:(?P<controller>(memory|.*cpuacct.*)):(?P<path>.+)', line)
if match is not None:
controller = match.group('controller')
path = match.group('path').lstrip('/') if match.group('path') != '/' else None
if controller == 'memory':
memory_path = path
else:
cpu_path = path
return cpu_path, memory_path
def get_process_cgroup_paths(self, process_id):
"""
Returns a tuple with the path of the cpu and memory cgroups for the given process. The 'process_id' can be a numeric PID or the string "self" for the current process.
The values returned can be None if the process is not in a cgroup for that controller (e.g. the controller is not mounted).
"""
cpu_cgroup_relative_path, memory_cgroup_relative_path = self.get_process_cgroup_relative_paths(process_id)
cpu_mount_point, memory_mount_point = self.get_cgroup_mount_points()
cpu_cgroup_path = os.path.join(cpu_mount_point, cpu_cgroup_relative_path) \
if cpu_mount_point is not None and cpu_cgroup_relative_path is not None else None
memory_cgroup_path = os.path.join(memory_mount_point, memory_cgroup_relative_path) \
if memory_mount_point is not None and memory_cgroup_relative_path is not None else None
return cpu_cgroup_path, memory_cgroup_path
def get_unit_cgroup_paths(self, unit_name):
"""
Returns a tuple with the path of the cpu and memory cgroups for the given unit.
The values returned can be None if the controller is not mounted.
Ex: ControlGroup=/azure.slice/walinuxagent.service
controlgroup_path[1:] = azure.slice/walinuxagent.service
"""
controlgroup_path = systemd.get_unit_property(unit_name, "ControlGroup")
cpu_mount_point, memory_mount_point = self.get_cgroup_mount_points()
cpu_cgroup_path = os.path.join(cpu_mount_point, controlgroup_path[1:]) \
if cpu_mount_point is not None else None
memory_cgroup_path = os.path.join(memory_mount_point, controlgroup_path[1:]) \
if memory_mount_point is not None else None
return cpu_cgroup_path, memory_cgroup_path
@staticmethod
def get_cgroup2_controllers():
"""
Returns a tuple with the mount point for the cgroups v2 controllers, and the currently mounted controllers;
either value can be None if cgroups v2 or its controllers are not mounted
"""
# the output of mount is similar to
# $ mount -t cgroup2
# cgroup2 on /sys/fs/cgroup/unified type cgroup2 (rw,nosuid,nodev,noexec,relatime,nsdelegate)
#
for line in shellutil.run_command(['mount', '-t', 'cgroup2']).splitlines():
match = re.search(r'on\s+(?P<path>/\S+)\s', line)
if match is not None:
mount_point = match.group('path')
controllers = None
controllers_file = os.path.join(mount_point, 'cgroup.controllers')
if os.path.exists(controllers_file):
controllers = fileutil.read_file(controllers_file)
return mount_point, controllers
return None, None
@staticmethod
def _is_systemd_failure(scope_name, stderr):
stderr.seek(0)
stderr = ustr(stderr.read(TELEMETRY_MESSAGE_MAX_LEN), encoding='utf-8', errors='backslashreplace')
unit_not_found = "Unit {0} not found.".format(scope_name)
return unit_not_found in stderr or scope_name not in stderr
@staticmethod
def get_extension_slice_name(extension_name, old_slice=False):
# The old slice makes it difficult for user to override the limits because they need to place drop-in files on every upgrade if extension slice is different for each version.
# old slice includes <HandlerName>.<ExtensionName>-<HandlerVersion>
# new slice without version <HandlerName>.<ExtensionName>
if not old_slice:
extension_name = extension_name.rsplit("-", 1)[0]
# Since '-' is used as a separator in systemd unit names, we replace it with '_' to prevent side-effects.
return EXTENSION_SLICE_PREFIX + "-" + extension_name.replace('-', '_') + ".slice"
def start_extension_command(self, extension_name, command, cmd_name, timeout, shell, cwd, env, stdout, stderr,
error_code=ExtensionErrorCodes.PluginUnknownFailure):
scope = "{0}_{1}".format(cmd_name, uuid.uuid4())
extension_slice_name = self.get_extension_slice_name(extension_name)
with self._systemd_run_commands_lock:
process = subprocess.Popen( # pylint: disable=W1509
# Some distros like ubuntu20 by default cpu and memory accounting enabled. Thus create nested cgroups under the extension slice
# So disabling CPU and Memory accounting prevents from creating nested cgroups, so that all the counters will be present in extension Cgroup
# since slice unit file configured with accounting enabled.
"systemd-run --property=CPUAccounting=no --property=MemoryAccounting=no --unit={0} --scope --slice={1} {2}".format(scope, extension_slice_name, command),
shell=shell,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env,
preexec_fn=os.setsid)
# We start systemd-run with shell == True so process.pid is the shell's pid, not the pid for systemd-run
self._systemd_run_commands.append(process.pid)
scope_name = scope + '.scope'
logger.info("Started extension in unit '{0}'", scope_name)
cpu_cgroup = None
try:
cgroup_relative_path = os.path.join('azure.slice/azure-vmextensions.slice', extension_slice_name)
cpu_cgroup_mountpoint, memory_cgroup_mountpoint = self.get_cgroup_mount_points()
if cpu_cgroup_mountpoint is None:
logger.info("The CPU controller is not mounted; will not track resource usage")
else:
cpu_cgroup_path = os.path.join(cpu_cgroup_mountpoint, cgroup_relative_path)
cpu_cgroup = CpuCgroup(extension_name, cpu_cgroup_path)
CGroupsTelemetry.track_cgroup(cpu_cgroup)
if memory_cgroup_mountpoint is None:
logger.info("The Memory controller is not mounted; will not track resource usage")
else:
memory_cgroup_path = os.path.join(memory_cgroup_mountpoint, cgroup_relative_path)
memory_cgroup = MemoryCgroup(extension_name, memory_cgroup_path)
CGroupsTelemetry.track_cgroup(memory_cgroup)
except IOError as e:
if e.errno == 2: # 'No such file or directory'
logger.info("The extension command already completed; will not track resource usage")
logger.info("Failed to start tracking resource usage for the extension: {0}", ustr(e))
except Exception as e:
logger.info("Failed to start tracking resource usage for the extension: {0}", ustr(e))
# Wait for process completion or timeout
try:
return handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout,
stderr=stderr, error_code=error_code, cpu_cgroup=cpu_cgroup)
except ExtensionError as e:
# The extension didn't terminate successfully. Determine whether it was due to systemd errors or
# extension errors.
if not self._is_systemd_failure(scope, stderr):
# There was an extension error; it either timed out or returned a non-zero exit code. Re-raise the error
raise
# There was an issue with systemd-run. We need to log it and retry the extension without systemd.
process_output = read_output(stdout, stderr)
# Reset the stdout and stderr
stdout.truncate(0)
stderr.truncate(0)
if isinstance(e, ExtensionOperationError):
# no-member: Instance of 'ExtensionError' has no 'exit_code' member (no-member) - Disabled: e is actually an ExtensionOperationError
err_msg = 'Systemd process exited with code %s and output %s' % (
e.exit_code, process_output) # pylint: disable=no-member
else:
err_msg = "Systemd timed-out, output: %s" % process_output
raise SystemdRunError(err_msg)
finally:
with self._systemd_run_commands_lock:
self._systemd_run_commands.remove(process.pid)
def cleanup_legacy_cgroups(self):
"""
Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent;
starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. If
we find that any of the legacy groups include the PID of the daemon then we need to disable data collection for this
instance (under systemd, moving PIDs across the cgroup file system can produce unpredictable results)
"""
return CGroupsApi._foreach_legacy_cgroup(lambda *_: None)
|
20ec5234f8ec1d2996516aca08befe39d53bd5d0
|
2fe9ec7915276be05ecf8710a63c6e1b5e79730a
|
/projects/vna/client/vna.py
|
401f8b2162da636e2cb7871d8297bfa800d4674c
|
[
"MIT"
] |
permissive
|
pavel-demin/red-pitaya-notes
|
bd3829ea730d80d34e3dd62a86bbb811ce207190
|
effa44a5c3cc1b4198c6bd32479b2fd791d3358d
|
refs/heads/master
| 2023-08-31T05:03:49.738847
| 2023-08-30T23:28:08
| 2023-08-30T23:28:08
| 28,404,370
| 308
| 206
|
MIT
| 2023-06-25T15:54:24
| 2014-12-23T15:15:00
|
C
|
UTF-8
|
Python
| false
| false
| 37,648
|
py
|
vna.py
|
#!/usr/bin/env python3
import sys
import struct
import warnings
from functools import partial
import numpy as np
import matplotlib
from matplotlib.figure import Figure
from matplotlib.ticker import Formatter, FuncFormatter
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
if "PyQt5" in sys.modules:
from PyQt5.uic import loadUiType
from PyQt5.QtCore import QRegExp, QTimer, QSettings, QDir, Qt
from PyQt5.QtGui import QRegExpValidator
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog, QFileDialog, QPushButton, QLabel, QSpinBox
from PyQt5.QtNetwork import QAbstractSocket, QTcpSocket
else:
from PySide2.QtUiTools import loadUiType
from PySide2.QtCore import QRegExp, QTimer, QSettings, QDir, Qt
from PySide2.QtGui import QRegExpValidator
from PySide2.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog, QFileDialog, QPushButton, QLabel, QSpinBox
from PySide2.QtNetwork import QAbstractSocket, QTcpSocket
Ui_VNA, QMainWindow = loadUiType("vna.ui")
def unicode_minus(s):
return s.replace("-", "\u2212")
def metric_prefix(x, pos=None):
if x == 0.0:
s = "0"
elif abs(x) >= 1.0e5:
if x > 0.0:
s = "99.9k"
else:
s = "-99.9k"
elif abs(x) < 1.0e-2:
if x > 0.0:
s = "9.99m"
else:
s = "-9.99m"
elif abs(x) >= 1.0e3:
s = "%.3gk" % (x * 1.0e-3)
elif abs(x) >= 1.0e0:
s = "%.3g" % x
elif abs(x) >= 1.0e-3:
s = "%.3gm" % (x * 1e3)
else:
s = "%.3g" % x
return unicode_minus(s)
class Measurement:
def __init__(self, start, stop, size):
self.freq = np.linspace(start, stop, size)
self.data = np.zeros(size, np.complex64)
self.period = 62500
class FigureTab:
cursors = [15000, 35000]
colors = ["orange", "violet"]
def __init__(self, layout, vna):
# create figure
self.figure = Figure()
if sys.platform != "win32":
self.figure.set_facecolor("none")
self.canvas = FigureCanvas(self.figure)
layout.addWidget(self.canvas)
# create navigation toolbar
self.toolbar = NavigationToolbar(self.canvas, None, False)
self.toolbar.layout().setSpacing(6)
# remove subplots action
actions = self.toolbar.actions()
if int(matplotlib.__version__[0]) < 2:
self.toolbar.removeAction(actions[7])
else:
self.toolbar.removeAction(actions[6])
self.toolbar.addSeparator()
self.cursorLabels = {}
self.cursorValues = {}
self.cursorMarkers = {}
self.cursorPressed = {}
for i in range(len(self.cursors)):
self.cursorMarkers[i] = None
self.cursorPressed[i] = False
self.cursorLabels[i] = QLabel("Cursor %d, kHz" % (i + 1))
self.cursorLabels[i].setStyleSheet("color: %s" % self.colors[i])
self.cursorValues[i] = QSpinBox()
self.cursorValues[i].setMinimumSize(90, 0)
self.cursorValues[i].setSingleStep(10)
self.cursorValues[i].setAlignment(Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter)
self.toolbar.addWidget(self.cursorLabels[i])
self.toolbar.addWidget(self.cursorValues[i])
self.cursorValues[i].valueChanged.connect(partial(self.set_cursor, i))
self.canvas.mpl_connect("button_press_event", partial(self.press_marker, i))
self.canvas.mpl_connect("motion_notify_event", partial(self.move_marker, i))
self.canvas.mpl_connect("button_release_event", partial(self.release_marker, i))
self.toolbar.addSeparator()
self.plotButton = QPushButton("Rescale")
self.toolbar.addWidget(self.plotButton)
layout.addWidget(self.toolbar)
self.plotButton.clicked.connect(self.plot)
self.mode = None
self.vna = vna
def add_cursors(self, axes):
if self.mode == "gain_short" or self.mode == "gain_open":
columns = ["Freq., kHz", "G, dB", r"$\angle$ G, deg"]
else:
columns = ["Freq., kHz", "Re(Z), \u03A9", "Im(Z), \u03A9", "|Z|, \u03A9", r"$\angle$ Z, deg", "SWR", r"|$\Gamma$|", r"$\angle$ $\Gamma$, deg", "RL, dB"]
y = len(self.cursors) * 0.04 + 0.01
for i in range(len(columns)):
self.figure.text(0.19 + 0.1 * i, y, columns[i], horizontalalignment="right")
self.cursorRows = {}
for i in range(len(self.cursors)):
y = len(self.cursors) * 0.04 - 0.03 - 0.04 * i
self.figure.text(0.01, y, "Cursor %d" % (i + 1), color=self.colors[i])
self.cursorRows[i] = {}
for j in range(len(columns)):
self.cursorRows[i][j] = self.figure.text(0.19 + 0.1 * j, y, "", horizontalalignment="right")
if self.mode == "smith":
(self.cursorMarkers[i],) = axes.plot(0.0, 0.0, marker="o", color=self.colors[i])
else:
self.cursorMarkers[i] = axes.axvline(0.0, color=self.colors[i], linewidth=2)
self.set_cursor(i, self.cursorValues[i].value())
def set_cursor(self, index, value):
FigureTab.cursors[index] = value
marker = self.cursorMarkers[index]
if marker is None:
return
row = self.cursorRows[index]
freq = value
gamma = self.vna.gamma(freq)
if self.mode == "smith":
marker.set_xdata(gamma.real)
marker.set_ydata(gamma.imag)
else:
marker.set_xdata(freq)
row[0].set_text("%d" % freq)
if self.mode == "gain_short":
gain = self.vna.gain_short(freq)
magnitude = 20.0 * np.log10(np.absolute(gain))
angle = np.angle(gain, deg=True)
row[1].set_text(unicode_minus("%.2f" % magnitude))
row[2].set_text(unicode_minus("%.1f" % angle))
elif self.mode == "gain_open":
gain = self.vna.gain_open(freq)
magnitude = 20.0 * np.log10(np.absolute(gain))
angle = np.angle(gain, deg=True)
row[1].set_text(unicode_minus("%.2f" % magnitude))
row[2].set_text(unicode_minus("%.1f" % angle))
else:
swr = self.vna.swr(freq)
z = self.vna.impedance(freq)
rl = 20.0 * np.log10(np.absolute(gamma))
if rl > -0.01:
rl = 0.0
row[1].set_text(metric_prefix(z.real))
row[2].set_text(metric_prefix(z.imag))
row[3].set_text(metric_prefix(np.absolute(z)))
angle = np.angle(z, deg=True)
if np.abs(angle) < 0.1:
angle = 0.0
row[4].set_text(unicode_minus("%.1f" % angle))
row[5].set_text(unicode_minus("%.2f" % swr))
row[6].set_text(unicode_minus("%.2f" % np.absolute(gamma)))
angle = np.angle(gamma, deg=True)
if np.abs(angle) < 0.1:
angle = 0.0
row[7].set_text(unicode_minus("%.1f" % angle))
row[8].set_text(unicode_minus("%.2f" % rl))
self.canvas.draw()
def press_marker(self, index, event):
if not event.inaxes:
return
if self.mode == "smith":
return
marker = self.cursorMarkers[index]
if marker is None:
return
contains, misc = marker.contains(event)
if not contains:
return
self.cursorPressed[index] = True
def move_marker(self, index, event):
if not event.inaxes:
return
if self.mode == "smith":
return
if not self.cursorPressed[index]:
return
self.cursorValues[index].setValue(event.xdata)
def release_marker(self, index, event):
self.cursorPressed[index] = False
def xlim(self, freq):
start = freq[0]
stop = freq[-1]
min = np.minimum(start, stop)
max = np.maximum(start, stop)
margin = (max - min) / 50
return (min - margin, max + margin)
def plot(self):
getattr(self, "plot_%s" % self.mode)()
def update(self, mode):
start = self.vna.dut.freq[0]
stop = self.vna.dut.freq[-1]
min = int(np.minimum(start, stop))
max = int(np.maximum(start, stop))
for i in range(len(self.cursors)):
value = self.cursors[i]
self.cursorValues[i].setRange(min, max)
self.cursorValues[i].setValue(value)
value = self.cursorValues[i].value()
self.set_cursor(i, value)
getattr(self, "update_%s" % mode)()
def plot_curves(self, freq, data1, label1, limit1, data2, label2, limit2):
matplotlib.rcdefaults()
matplotlib.rcParams["axes.formatter.use_mathtext"] = True
self.figure.clf()
bottom = len(self.cursors) * 0.04 + 0.13
self.figure.subplots_adjust(left=0.16, bottom=bottom, right=0.84, top=0.96)
axes1 = self.figure.add_subplot(111)
axes1.cla()
axes1.xaxis.grid()
axes1.set_xlabel("kHz")
axes1.set_ylabel(label1)
xlim = self.xlim(freq)
axes1.set_xlim(xlim)
if limit1 is not None:
axes1.set_ylim(limit1)
(self.curve1,) = axes1.plot(freq, data1, color="blue", label=label1)
self.add_cursors(axes1)
if data2 is None:
self.canvas.draw()
return
axes1.tick_params("y", color="blue", labelcolor="blue")
axes1.yaxis.label.set_color("blue")
axes2 = axes1.twinx()
axes2.spines["left"].set_color("blue")
axes2.spines["right"].set_color("red")
axes2.set_ylabel(label2)
axes2.set_xlim(xlim)
if limit2 is not None:
axes2.set_ylim(limit2)
axes2.tick_params("y", color="red", labelcolor="red")
axes2.yaxis.label.set_color("red")
(self.curve2,) = axes2.plot(freq, data2, color="red", label=label2)
self.canvas.draw()
def plot_gain(self, gain):
freq = self.vna.dut.freq
data1 = 20.0 * np.log10(np.absolute(gain))
data2 = np.angle(gain, deg=True)
self.plot_curves(freq, data1, "G, dB", (-110, 110.0), data2, r"$\angle$ G, deg", (-198, 198))
def plot_gain_short(self):
self.mode = "gain_short"
self.plot_gain(self.vna.gain_short(self.vna.dut.freq))
def plot_gain_open(self):
self.mode = "gain_open"
self.plot_gain(self.vna.gain_open(self.vna.dut.freq))
def update_gain(self, gain, mode):
if self.mode == mode:
self.curve1.set_xdata(self.vna.dut.freq)
self.curve1.set_ydata(20.0 * np.log10(np.absolute(gain)))
self.curve2.set_xdata(self.vna.dut.freq)
self.curve2.set_ydata(np.angle(gain, deg=True))
self.canvas.draw()
else:
self.mode = mode
self.plot_gain(gain)
def update_gain_short(self):
self.update_gain(self.vna.gain_short(self.vna.dut.freq), "gain_short")
def update_gain_open(self):
self.update_gain(self.vna.gain_open(self.vna.dut.freq), "gain_open")
def plot_magphase(self, freq, data, label, mode):
self.mode = mode
data1 = np.absolute(data)
data2 = np.angle(data, deg=True)
max = np.fmax(0.01, data1.max())
label1 = r"|%s|" % label
label2 = r"$\angle$ %s, deg" % label
self.plot_curves(freq, data1, label1, (-0.05 * max, 1.05 * max), data2, label2, (-198, 198))
def update_magphase(self, freq, data, label, mode):
if self.mode == mode:
self.curve1.set_xdata(freq)
self.curve1.set_ydata(np.absolute(data))
self.curve2.set_xdata(freq)
self.curve2.set_ydata(np.angle(data, deg=True))
self.canvas.draw()
else:
self.plot_magphase(freq, data, label, mode)
def plot_open(self):
self.plot_magphase(self.vna.open.freq, self.vna.open.data, "open", "open")
def update_open(self):
self.update_magphase(self.vna.open.freq, self.vna.open.data, "open", "open")
def plot_short(self):
self.plot_magphase(self.vna.short.freq, self.vna.short.data, "short", "short")
def update_short(self):
self.update_magphase(self.vna.short.freq, self.vna.short.data, "short", "short")
def plot_load(self):
self.plot_magphase(self.vna.load.freq, self.vna.load.data, "load", "load")
def update_load(self):
self.update_magphase(self.vna.load.freq, self.vna.load.data, "load", "load")
def plot_dut(self):
self.plot_magphase(self.vna.dut.freq, self.vna.dut.data, "dut", "dut")
def update_dut(self):
self.update_magphase(self.vna.dut.freq, self.vna.dut.data, "dut", "dut")
def plot_smith_grid(self, axes, color):
load = 50.0
ticks = np.array([0.0, 0.2, 0.5, 1.0, 2.0, 5.0])
for tick in ticks * load:
axis = np.logspace(-4, np.log10(1.0e3), 200) * load
z = tick + 1.0j * axis
gamma = (z - load) / (z + load)
axes.plot(gamma.real, gamma.imag, color=color, linewidth=0.4, alpha=0.3)
axes.plot(gamma.real, -gamma.imag, color=color, linewidth=0.4, alpha=0.3)
z = axis + 1.0j * tick
gamma = (z - load) / (z + load)
axes.plot(gamma.real, gamma.imag, color=color, linewidth=0.4, alpha=0.3)
axes.plot(gamma.real, -gamma.imag, color=color, linewidth=0.4, alpha=0.3)
if tick == 0.0:
axes.text(1.0, 0.0, "\u221E", color=color, ha="left", va="center", clip_on=True, fontsize="x-large")
axes.text(-1.0, 0.0, "0\u03A9", color=color, ha="left", va="bottom", clip_on=True)
continue
lab = "%d\u03A9" % tick
x = (tick - load) / (tick + load)
axes.text(x, 0.0, lab, color=color, ha="left", va="bottom", clip_on=True)
lab = "j%d\u03A9" % tick
z = 1.0j * tick
gamma = (z - load) / (z + load) * 1.05
x = gamma.real
y = gamma.imag
angle = np.angle(gamma) * 180.0 / np.pi - 90.0
axes.text(x, y, lab, color=color, ha="center", va="center", clip_on=True, rotation=angle)
lab = "\u2212j%d\u03A9" % tick
axes.text(x, -y, lab, color=color, ha="center", va="center", clip_on=True, rotation=-angle)
def plot_smith(self):
self.mode = "smith"
matplotlib.rcdefaults()
self.figure.clf()
bottom = len(self.cursors) * 0.04 + 0.05
self.figure.subplots_adjust(left=0.0, bottom=bottom, right=1.0, top=1.0)
axes1 = self.figure.add_subplot(111)
self.plot_smith_grid(axes1, "blue")
gamma = self.vna.gamma(self.vna.dut.freq)
(self.curve1,) = axes1.plot(gamma.real, gamma.imag, color="red")
axes1.axis("equal")
axes1.set_xlim(-1.12, 1.12)
axes1.set_ylim(-1.12, 1.12)
axes1.xaxis.set_visible(False)
axes1.yaxis.set_visible(False)
for loc, spine in axes1.spines.items():
spine.set_visible(False)
self.add_cursors(axes1)
self.canvas.draw()
def update_smith(self):
if self.mode == "smith":
gamma = self.vna.gamma(self.vna.dut.freq)
self.curve1.set_xdata(gamma.real)
self.curve1.set_ydata(gamma.imag)
self.canvas.draw()
else:
self.plot_smith()
def plot_imp(self):
self.mode = "imp"
freq = self.vna.dut.freq
z = self.vna.impedance(freq)
data1 = np.fmin(9.99e4, np.absolute(z))
data2 = np.angle(z, deg=True)
max = np.fmax(0.01, data1.max())
self.plot_curves(freq, data1, "|Z|, \u03A9", (-0.05 * max, 1.05 * max), data2, r"$\angle$ Z, deg", (-198, 198))
def update_imp(self):
if self.mode == "imp":
freq = self.vna.dut.freq
z = self.vna.impedance(freq)
data1 = np.fmin(9.99e4, np.absolute(z))
data2 = np.angle(z, deg=True)
self.curve1.set_xdata(freq)
self.curve1.set_ydata(data1)
self.curve2.set_xdata(freq)
self.curve2.set_ydata(data2)
self.canvas.draw()
else:
self.plot_imp()
def plot_swr(self):
self.mode = "swr"
freq = self.vna.dut.freq
data1 = self.vna.swr(freq)
self.plot_curves(freq, data1, "SWR", (0.9, 3.1), None, None, None)
def update_swr(self):
if self.mode == "swr":
self.curve1.set_xdata(self.vna.dut.freq)
self.curve1.set_ydata(self.vna.swr(self.vna.dut.freq))
self.canvas.draw()
else:
self.plot_swr()
def plot_gamma(self):
self.plot_magphase(self.vna.dut.freq, self.vna.gamma(self.vna.dut.freq), r"$\Gamma$", "gamma")
def update_gamma(self):
self.update_magphase(self.vna.dut.freq, self.vna.gamma(self.vna.dut.freq), r"$\Gamma$", "gamma")
def plot_rl(self):
self.mode = "rl"
freq = self.vna.dut.freq
gamma = self.vna.gamma(freq)
data1 = 20.0 * np.log10(np.absolute(gamma))
self.plot_curves(freq, data1, "RL, dB", (-105, 5.0), None, None, None)
def update_rl(self):
if self.mode == "rl":
freq = self.vna.dut.freq
gamma = self.vna.gamma(freq)
data1 = 20.0 * np.log10(np.absolute(gamma))
self.curve1.set_xdata(freq)
self.curve1.set_ydata(data1)
self.canvas.draw()
else:
self.plot_rl()
class VNA(QMainWindow, Ui_VNA):
graphs = ["open", "short", "load", "dut", "smith", "imp", "swr", "gamma", "rl", "gain_short", "gain_open"]
def __init__(self):
super(VNA, self).__init__()
self.setupUi(self)
# address validator
rx = QRegExp("^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])|rp-[0-9A-Fa-f]{6}\.local$")
self.addrValue.setValidator(QRegExpValidator(rx, self.addrValue))
# state variables
self.idle = True
self.reading = False
self.auto = False
# sweep parameters
self.sweep_start = 10
self.sweep_stop = 50000
self.sweep_size = 5000
# buffer and offset for the incoming samples
self.buffer = bytearray(16 * 32768)
self.offset = 0
self.data = np.frombuffer(self.buffer, np.complex64)
# create measurements
self.open = Measurement(self.sweep_start, self.sweep_stop, self.sweep_size)
self.short = Measurement(self.sweep_start, self.sweep_stop, self.sweep_size)
self.load = Measurement(self.sweep_start, self.sweep_stop, self.sweep_size)
self.dut = Measurement(self.sweep_start, self.sweep_stop, self.sweep_size)
self.mode = "open"
# create figures
self.tabs = {}
for i in range(len(self.graphs)):
layout = getattr(self, "%sLayout" % self.graphs[i])
self.tabs[i] = FigureTab(layout, self)
# configure widgets
self.rateValue.addItems(["5000", "1000", "500", "100", "50", "10", "5", "1"])
self.rateValue.lineEdit().setReadOnly(True)
self.rateValue.lineEdit().setAlignment(Qt.AlignRight)
for i in range(self.rateValue.count()):
self.rateValue.setItemData(i, Qt.AlignRight, Qt.TextAlignmentRole)
self.set_enabled(False)
self.stopSweep.setEnabled(False)
# read settings
settings = QSettings("vna.ini", QSettings.IniFormat)
self.read_cfg_settings(settings)
# create TCP socket
self.socket = QTcpSocket(self)
self.socket.connected.connect(self.connected)
self.socket.readyRead.connect(self.read_data)
self.socket.error.connect(self.display_error)
# connect signals from widgets
self.connectButton.clicked.connect(self.start)
self.writeButton.clicked.connect(self.write_cfg)
self.readButton.clicked.connect(self.read_cfg)
self.openSweep.clicked.connect(partial(self.sweep, "open"))
self.shortSweep.clicked.connect(partial(self.sweep, "short"))
self.loadSweep.clicked.connect(partial(self.sweep, "load"))
self.singleSweep.clicked.connect(partial(self.sweep, "dut"))
self.autoSweep.clicked.connect(self.sweep_auto)
self.stopSweep.clicked.connect(self.cancel)
self.csvButton.clicked.connect(self.write_csv)
self.s1pButton.clicked.connect(self.write_s1p)
self.s2pshortButton.clicked.connect(self.write_s2p_short)
self.s2popenButton.clicked.connect(self.write_s2p_open)
self.startValue.valueChanged.connect(self.set_start)
self.stopValue.valueChanged.connect(self.set_stop)
self.sizeValue.valueChanged.connect(self.set_size)
self.rateValue.currentIndexChanged.connect(self.set_rate)
self.corrValue.valueChanged.connect(self.set_corr)
self.phase1Value.valueChanged.connect(self.set_phase1)
self.phase2Value.valueChanged.connect(self.set_phase2)
self.level1Value.valueChanged.connect(self.set_level1)
self.level2Value.valueChanged.connect(self.set_level2)
self.tabWidget.currentChanged.connect(self.update_tab)
# create timers
self.startTimer = QTimer(self)
self.startTimer.timeout.connect(self.timeout)
self.sweepTimer = QTimer(self)
self.sweepTimer.timeout.connect(self.sweep_timeout)
def set_enabled(self, enabled):
widgets = [
self.rateValue,
self.level1Value,
self.level2Value,
self.corrValue,
self.phase1Value,
self.phase2Value,
self.startValue,
self.stopValue,
self.sizeValue,
self.openSweep,
self.shortSweep,
self.loadSweep,
self.singleSweep,
self.autoSweep,
]
for entry in widgets:
entry.setEnabled(enabled)
def start(self):
if self.idle:
self.connectButton.setEnabled(False)
self.socket.connectToHost(self.addrValue.text(), 1001)
self.startTimer.start(5000)
else:
self.stop()
def stop(self):
self.idle = True
self.cancel()
self.socket.abort()
self.connectButton.setText("Connect")
self.connectButton.setEnabled(True)
self.set_enabled(False)
self.stopSweep.setEnabled(False)
def timeout(self):
self.display_error("timeout")
def connected(self):
self.startTimer.stop()
self.idle = False
self.set_rate(self.rateValue.currentIndex())
self.set_corr(self.corrValue.value())
self.set_phase1(self.phase1Value.value())
self.set_phase2(self.phase2Value.value())
self.set_level1(self.level1Value.value())
self.set_level2(self.level2Value.value())
self.set_gpio(1)
self.connectButton.setText("Disconnect")
self.connectButton.setEnabled(True)
self.set_enabled(True)
self.stopSweep.setEnabled(True)
def read_data(self):
while self.socket.bytesAvailable() > 0:
if not self.reading:
self.socket.readAll()
return
size = self.socket.bytesAvailable()
self.progressBar.setValue((self.offset + size) / 16)
limit = 16 * self.sweep_size
if self.offset + size < limit:
self.buffer[self.offset : self.offset + size] = self.socket.read(size)
self.offset += size
else:
self.buffer[self.offset : limit] = self.socket.read(limit - self.offset)
adc1 = self.data[0::2]
adc2 = self.data[1::2]
attr = getattr(self, self.mode)
start = self.sweep_start
stop = self.sweep_stop
size = self.sweep_size
attr.freq = np.linspace(start, stop, size)
attr.data = adc1[0:size].copy()
self.update_tab()
self.reading = False
if not self.auto:
self.progressBar.setValue(0)
self.set_enabled(True)
def display_error(self, socketError):
self.startTimer.stop()
if socketError == "timeout":
QMessageBox.information(self, "VNA", "Error: connection timeout.")
else:
QMessageBox.information(self, "VNA", "Error: %s." % self.socket.errorString())
self.stop()
def set_start(self, value):
self.sweep_start = value
def set_stop(self, value):
self.sweep_stop = value
def set_size(self, value):
self.sweep_size = value
def set_rate(self, value):
if self.idle:
return
rate = [10, 50, 100, 500, 1000, 5000, 10000, 50000][value]
self.socket.write(struct.pack("<I", 3 << 28 | int(rate)))
def set_corr(self, value):
if self.idle:
return
self.socket.write(struct.pack("<I", 4 << 28 | int(value & 0xFFFFFFF)))
def set_phase1(self, value):
if self.idle:
return
self.socket.write(struct.pack("<I", 5 << 28 | int(value)))
def set_phase2(self, value):
if self.idle:
return
self.socket.write(struct.pack("<I", 6 << 28 | int(value)))
def set_level1(self, value):
if self.idle:
return
data = 0 if value == -90 else int(32766 * np.power(10.0, value / 20.0))
self.socket.write(struct.pack("<I", 7 << 28 | int(data)))
def set_level2(self, value):
if self.idle:
return
data = 0 if value == -90 else int(32766 * np.power(10.0, value / 20.0))
self.socket.write(struct.pack("<I", 8 << 28 | int(data)))
def set_gpio(self, value):
if self.idle:
return
self.socket.write(struct.pack("<I", 9 << 28 | int(value)))
def sweep(self, mode):
if self.idle:
return
self.set_enabled(False)
self.mode = mode
self.offset = 0
self.reading = True
self.socket.write(struct.pack("<I", 0 << 28 | int(self.sweep_start * 1000)))
self.socket.write(struct.pack("<I", 1 << 28 | int(self.sweep_stop * 1000)))
self.socket.write(struct.pack("<I", 2 << 28 | int(self.sweep_size)))
self.socket.write(struct.pack("<I", 10 << 28))
self.progressBar.setMinimum(0)
self.progressBar.setMaximum(self.sweep_size)
self.progressBar.setValue(0)
def cancel(self):
self.sweepTimer.stop()
self.auto = False
self.reading = False
self.socket.write(struct.pack("<I", 11 << 28))
self.progressBar.setValue(0)
self.set_enabled(True)
def sweep_auto(self):
self.auto = True
self.sweepTimer.start(100)
def sweep_timeout(self):
if not self.reading:
self.sweep("dut")
def update_tab(self):
index = self.tabWidget.currentIndex()
self.tabs[index].update(self.graphs[index])
def interp(self, freq, meas):
real = np.interp(freq, meas.freq, meas.data.real, period=meas.period)
imag = np.interp(freq, meas.freq, meas.data.imag, period=meas.period)
return real + 1j * imag
def gain_short(self, freq):
short = self.interp(freq, self.short)
dut = self.interp(freq, self.dut)
return np.divide(dut, short)
def gain_open(self, freq):
open = self.interp(freq, self.open)
dut = self.interp(freq, self.dut)
return np.divide(dut, open)
def impedance(self, freq):
open = self.interp(freq, self.open)
short = self.interp(freq, self.short)
load = self.interp(freq, self.load)
dut = self.interp(freq, self.dut)
z = np.divide(50.0 * (open - load) * (dut - short), (load - short) * (open - dut))
z = np.asarray(z)
z.real[z.real < 1.0e-2] = 9.99e-3
return z
def gamma(self, freq):
z = self.impedance(freq)
return np.divide(z - 50.0, z + 50.0)
def swr(self, freq):
magnitude = np.absolute(self.gamma(freq))
swr = np.divide(1.0 + magnitude, 1.0 - magnitude)
return np.clip(swr, 1.0, 99.99)
def write_cfg(self):
dialog = QFileDialog(self, "Write configuration settings", ".", "*.ini")
dialog.setDefaultSuffix("ini")
dialog.selectFile("vna.ini")
dialog.setAcceptMode(QFileDialog.AcceptSave)
dialog.setOptions(QFileDialog.DontConfirmOverwrite)
if dialog.exec() == QDialog.Accepted:
name = dialog.selectedFiles()
settings = QSettings(name[0], QSettings.IniFormat)
self.write_cfg_settings(settings)
def read_cfg(self):
dialog = QFileDialog(self, "Read configuration settings", ".", "*.ini")
dialog.setDefaultSuffix("ini")
dialog.selectFile("vna.ini")
dialog.setAcceptMode(QFileDialog.AcceptOpen)
if dialog.exec() == QDialog.Accepted:
name = dialog.selectedFiles()
settings = QSettings(name[0], QSettings.IniFormat)
self.read_cfg_settings(settings)
window.update_tab()
def write_cfg_settings(self, settings):
settings.setValue("addr", self.addrValue.text())
settings.setValue("rate", self.rateValue.currentIndex())
settings.setValue("corr", self.corrValue.value())
settings.setValue("phase_1", self.phase1Value.value())
settings.setValue("phase_2", self.phase2Value.value())
settings.setValue("level_1", self.level1Value.value())
settings.setValue("level_2", self.level2Value.value())
settings.setValue("open_start", int(self.open.freq[0]))
settings.setValue("open_stop", int(self.open.freq[-1]))
settings.setValue("open_size", self.open.freq.size)
settings.setValue("short_start", int(self.short.freq[0]))
settings.setValue("short_stop", int(self.short.freq[-1]))
settings.setValue("short_size", self.short.freq.size)
settings.setValue("load_start", int(self.load.freq[0]))
settings.setValue("load_stop", int(self.load.freq[-1]))
settings.setValue("load_size", self.load.freq.size)
settings.setValue("dut_start", int(self.dut.freq[0]))
settings.setValue("dut_stop", int(self.dut.freq[-1]))
settings.setValue("dut_size", self.dut.freq.size)
for i in range(len(FigureTab.cursors)):
settings.setValue("cursor_%d" % i, FigureTab.cursors[i])
data = self.open.data
for i in range(self.open.freq.size):
settings.setValue("open_real_%d" % i, float(data.real[i]))
settings.setValue("open_imag_%d" % i, float(data.imag[i]))
data = self.short.data
for i in range(self.short.freq.size):
settings.setValue("short_real_%d" % i, float(data.real[i]))
settings.setValue("short_imag_%d" % i, float(data.imag[i]))
data = self.load.data
for i in range(self.load.freq.size):
settings.setValue("load_real_%d" % i, float(data.real[i]))
settings.setValue("load_imag_%d" % i, float(data.imag[i]))
data = self.dut.data
for i in range(self.dut.freq.size):
settings.setValue("dut_real_%d" % i, float(data.real[i]))
settings.setValue("dut_imag_%d" % i, float(data.imag[i]))
def read_cfg_settings(self, settings):
self.addrValue.setText(settings.value("addr", "192.168.1.100"))
self.rateValue.setCurrentIndex(settings.value("rate", 0, type=int))
self.corrValue.setValue(settings.value("corr", 0, type=int))
self.phase1Value.setValue(settings.value("phase_1", 0, type=int))
self.phase2Value.setValue(settings.value("phase_2", 0, type=int))
self.level1Value.setValue(settings.value("level_1", 0, type=int))
self.level2Value.setValue(settings.value("level_2", -90, type=int))
open_start = settings.value("open_start", 10, type=int)
open_stop = settings.value("open_stop", 50000, type=int)
open_size = settings.value("open_size", 5000, type=int)
short_start = settings.value("short_start", 10, type=int)
short_stop = settings.value("short_stop", 50000, type=int)
short_size = settings.value("short_size", 5000, type=int)
load_start = settings.value("load_start", 10, type=int)
load_stop = settings.value("load_stop", 50000, type=int)
load_size = settings.value("load_size", 5000, type=int)
dut_start = settings.value("dut_start", 10, type=int)
dut_stop = settings.value("dut_stop", 50000, type=int)
dut_size = settings.value("dut_size", 5000, type=int)
self.startValue.setValue(dut_start)
self.stopValue.setValue(dut_stop)
self.sizeValue.setValue(dut_size)
for i in range(len(FigureTab.cursors)):
FigureTab.cursors[i] = settings.value("cursor_%d" % i, FigureTab.cursors[i], type=int)
self.open.freq = np.linspace(open_start, open_stop, open_size)
self.open.data = np.zeros(open_size, np.complex64)
for i in range(open_size):
real = settings.value("open_real_%d" % i, 0.0, type=float)
imag = settings.value("open_imag_%d" % i, 0.0, type=float)
self.open.data[i] = real + 1.0j * imag
self.short.freq = np.linspace(short_start, short_stop, short_size)
self.short.data = np.zeros(short_size, np.complex64)
for i in range(short_size):
real = settings.value("short_real_%d" % i, 0.0, type=float)
imag = settings.value("short_imag_%d" % i, 0.0, type=float)
self.short.data[i] = real + 1.0j * imag
self.load.freq = np.linspace(load_start, load_stop, load_size)
self.load.data = np.zeros(load_size, np.complex64)
for i in range(load_size):
real = settings.value("load_real_%d" % i, 0.0, type=float)
imag = settings.value("load_imag_%d" % i, 0.0, type=float)
self.load.data[i] = real + 1.0j * imag
self.dut.freq = np.linspace(dut_start, dut_stop, dut_size)
self.dut.data = np.zeros(dut_size, np.complex64)
for i in range(dut_size):
real = settings.value("dut_real_%d" % i, 0.0, type=float)
imag = settings.value("dut_imag_%d" % i, 0.0, type=float)
self.dut.data[i] = real + 1.0j * imag
def write_csv(self):
dialog = QFileDialog(self, "Write csv file", ".", "*.csv")
dialog.setDefaultSuffix("csv")
dialog.setAcceptMode(QFileDialog.AcceptSave)
dialog.setOptions(QFileDialog.DontConfirmOverwrite)
if dialog.exec() == QDialog.Accepted:
name = dialog.selectedFiles()
fh = open(name[0], "w")
f = self.dut.freq
o = self.interp(f, self.open)
s = self.interp(f, self.short)
l = self.interp(f, self.load)
d = self.dut.data
fh.write("frequency;open.real;open.imag;short.real;short.imag;load.real;load.imag;dut.real;dut.imag\n")
for i in range(f.size):
fh.write("0.0%.8d;%12.9f;%12.9f;%12.9f;%12.9f;%12.9f;%12.9f;%12.9f;%12.9f\n" % (f[i] * 1000, o.real[i], o.imag[i], s.real[i], s.imag[i], l.real[i], l.imag[i], d.real[i], d.imag[i]))
fh.close()
def write_s1p(self):
dialog = QFileDialog(self, "Write s1p file", ".", "*.s1p")
dialog.setDefaultSuffix("s1p")
dialog.setAcceptMode(QFileDialog.AcceptSave)
dialog.setOptions(QFileDialog.DontConfirmOverwrite)
if dialog.exec() == QDialog.Accepted:
name = dialog.selectedFiles()
fh = open(name[0], "w")
freq = self.dut.freq
gamma = self.gamma(freq)
fh.write("# GHz S MA R 50\n")
for i in range(freq.size):
fh.write("0.0%.8d %8.6f %7.2f\n" % (freq[i] * 1000, np.absolute(gamma[i]), np.angle(gamma[i], deg=True)))
fh.close()
def write_s2p(self, gain):
dialog = QFileDialog(self, "Write s2p file", ".", "*.s2p")
dialog.setDefaultSuffix("s2p")
dialog.setAcceptMode(QFileDialog.AcceptSave)
dialog.setOptions(QFileDialog.DontConfirmOverwrite)
if dialog.exec() == QDialog.Accepted:
name = dialog.selectedFiles()
fh = open(name[0], "w")
freq = self.dut.freq
gamma = self.gamma(freq)
fh.write("# GHz S MA R 50\n")
for i in range(freq.size):
fh.write(
"0.0%.8d %8.6f %7.2f %8.6f %7.2f 0.000000 0.00 0.000000 0.00\n"
% (freq[i] * 1000, np.absolute(gamma[i]), np.angle(gamma[i], deg=True), np.absolute(gain[i]), np.angle(gain[i], deg=True))
)
fh.close()
def write_s2p_short(self):
self.write_s2p(self.gain_short(self.dut.freq))
def write_s2p_open(self):
self.write_s2p(self.gain_open(self.dut.freq))
warnings.filterwarnings("ignore")
app = QApplication(sys.argv)
dpi = app.primaryScreen().logicalDotsPerInch()
matplotlib.rcParams["figure.dpi"] = dpi
window = VNA()
window.update_tab()
window.show()
sys.exit(app.exec_())
|
643e2a7ad6800a373d5f3ee4ed8deb5b9ed830bd
|
b728c792b5171f6be6ad91919b4a76a6f198b3e9
|
/src/lib/python/bundy/cc/__init__.py
|
089c914491d8de1c64ceef30b018117407407fb5
|
[
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"BSL-1.0"
] |
permissive
|
bundy-dns/bundy
|
c8beeca2c051924590794c92a3a58d1980a86024
|
3d41934996b82b0cd2fe22dd74d2abc1daba835d
|
refs/heads/master
| 2021-09-28T16:24:39.037808
| 2021-09-22T06:04:17
| 2021-09-22T06:04:17
| 19,160,469
| 110
| 33
|
NOASSERTION
| 2021-09-22T06:04:18
| 2014-04-25T20:54:37
|
C++
|
UTF-8
|
Python
| false
| false
| 55
|
py
|
__init__.py
|
import bundy.cc.message
from bundy.cc.session import *
|
1f2be47b7696ebf704fb300c561767ae4a0187f6
|
67ae6daab10dbefa937a97b542a97569d95fe271
|
/7kyuKatas/Are_the_numbers_in_order.py
|
6941ffc458dd965b59b673b2098d5e06c7182d89
|
[] |
no_license
|
hevalhazalkurt/codewars_python_solutions
|
915536ec857157f21d47ee96c2e01eabad2f4ed7
|
abdb2e63edfe2c70be5df4d56036da73d4a32367
|
refs/heads/master
| 2023-06-07T05:06:08.014121
| 2023-04-08T09:37:02
| 2023-04-08T09:37:02
| 237,953,546
| 167
| 134
| null | 2022-08-27T09:04:49
| 2020-02-03T11:52:04
|
Python
|
UTF-8
|
Python
| false
| false
| 54
|
py
|
Are_the_numbers_in_order.py
|
def in_asc_order(arr):
return arr == sorted(arr)
|
1ed311cb8d2521df3dcc1f928c903c2b2ea4dd64
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/citybikes/sensor.py
|
fcd780dba7d7d609c8b98dc44f5c5984f060453a
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 10,382
|
py
|
sensor.py
|
"""Sensor for the CityBikes data."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
import aiohttp
import voluptuous as vol
from homeassistant.components.sensor import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SensorEntity,
)
from homeassistant.const import (
ATTR_ID,
ATTR_LATITUDE,
ATTR_LOCATION,
ATTR_LONGITUDE,
ATTR_NAME,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_RADIUS,
UnitOfLength,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import location
from homeassistant.util.unit_conversion import DistanceConverter
from homeassistant.util.unit_system import US_CUSTOMARY_SYSTEM
_LOGGER = logging.getLogger(__name__)
ATTR_EMPTY_SLOTS = "empty_slots"
ATTR_EXTRA = "extra"
ATTR_FREE_BIKES = "free_bikes"
ATTR_NETWORK = "network"
ATTR_NETWORKS_LIST = "networks"
ATTR_STATIONS_LIST = "stations"
ATTR_TIMESTAMP = "timestamp"
ATTR_UID = "uid"
CONF_NETWORK = "network"
CONF_STATIONS_LIST = "stations"
DEFAULT_ENDPOINT = "https://api.citybik.es/{uri}"
PLATFORM = "citybikes"
MONITORED_NETWORKS = "monitored-networks"
NETWORKS_URI = "v2/networks"
REQUEST_TIMEOUT = 5 # In seconds; argument to asyncio.timeout
SCAN_INTERVAL = timedelta(minutes=5) # Timely, and doesn't suffocate the API
STATIONS_URI = "v2/networks/{uid}?fields=network.stations"
CITYBIKES_ATTRIBUTION = (
"Information provided by the CityBikes Project (https://citybik.es/#about)"
)
CITYBIKES_NETWORKS = "citybikes_networks"
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_RADIUS, CONF_STATIONS_LIST),
PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=""): cv.string,
vol.Optional(CONF_NETWORK): cv.string,
vol.Inclusive(CONF_LATITUDE, "coordinates"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "coordinates"): cv.longitude,
vol.Optional(CONF_RADIUS, "station_filter"): cv.positive_int,
vol.Optional(CONF_STATIONS_LIST, "station_filter"): vol.All(
cv.ensure_list, vol.Length(min=1), [cv.string]
),
}
),
)
NETWORK_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ID): cv.string,
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_LOCATION): vol.Schema(
{
vol.Required(ATTR_LATITUDE): cv.latitude,
vol.Required(ATTR_LONGITUDE): cv.longitude,
},
extra=vol.REMOVE_EXTRA,
),
},
extra=vol.REMOVE_EXTRA,
)
NETWORKS_RESPONSE_SCHEMA = vol.Schema(
{vol.Required(ATTR_NETWORKS_LIST): [NETWORK_SCHEMA]}
)
STATION_SCHEMA = vol.Schema(
{
vol.Required(ATTR_FREE_BIKES): cv.positive_int,
vol.Required(ATTR_EMPTY_SLOTS): vol.Any(cv.positive_int, None),
vol.Required(ATTR_LATITUDE): cv.latitude,
vol.Required(ATTR_LONGITUDE): cv.longitude,
vol.Required(ATTR_ID): cv.string,
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_TIMESTAMP): cv.string,
vol.Optional(ATTR_EXTRA): vol.Schema(
{vol.Optional(ATTR_UID): cv.string}, extra=vol.REMOVE_EXTRA
),
},
extra=vol.REMOVE_EXTRA,
)
STATIONS_RESPONSE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_NETWORK): vol.Schema(
{vol.Required(ATTR_STATIONS_LIST): [STATION_SCHEMA]}, extra=vol.REMOVE_EXTRA
)
}
)
class CityBikesRequestError(Exception):
"""Error to indicate a CityBikes API request has failed."""
async def async_citybikes_request(hass, uri, schema):
"""Perform a request to CityBikes API endpoint, and parse the response."""
try:
session = async_get_clientsession(hass)
async with asyncio.timeout(REQUEST_TIMEOUT):
req = await session.get(DEFAULT_ENDPOINT.format(uri=uri))
json_response = await req.json()
return schema(json_response)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Could not connect to CityBikes API endpoint")
except ValueError:
_LOGGER.error("Received non-JSON data from CityBikes API endpoint")
except vol.Invalid as err:
_LOGGER.error("Received unexpected JSON from CityBikes API endpoint: %s", err)
raise CityBikesRequestError
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the CityBikes platform."""
if PLATFORM not in hass.data:
hass.data[PLATFORM] = {MONITORED_NETWORKS: {}}
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
network_id = config.get(CONF_NETWORK)
stations_list = set(config.get(CONF_STATIONS_LIST, []))
radius = config.get(CONF_RADIUS, 0)
name = config[CONF_NAME]
if hass.config.units is US_CUSTOMARY_SYSTEM:
radius = DistanceConverter.convert(
radius, UnitOfLength.FEET, UnitOfLength.METERS
)
# Create a single instance of CityBikesNetworks.
networks = hass.data.setdefault(CITYBIKES_NETWORKS, CityBikesNetworks(hass))
if not network_id:
network_id = await networks.get_closest_network_id(latitude, longitude)
if network_id not in hass.data[PLATFORM][MONITORED_NETWORKS]:
network = CityBikesNetwork(hass, network_id)
hass.data[PLATFORM][MONITORED_NETWORKS][network_id] = network
hass.async_create_task(network.async_refresh())
async_track_time_interval(hass, network.async_refresh, SCAN_INTERVAL)
else:
network = hass.data[PLATFORM][MONITORED_NETWORKS][network_id]
await network.ready.wait()
devices = []
for station in network.stations:
dist = location.distance(
latitude, longitude, station[ATTR_LATITUDE], station[ATTR_LONGITUDE]
)
station_id = station[ATTR_ID]
station_uid = str(station.get(ATTR_EXTRA, {}).get(ATTR_UID, ""))
if radius > dist or stations_list.intersection((station_id, station_uid)):
if name:
uid = "_".join([network.network_id, name, station_id])
else:
uid = "_".join([network.network_id, station_id])
entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, uid, hass=hass)
devices.append(CityBikesStation(network, station_id, entity_id))
async_add_entities(devices, True)
class CityBikesNetworks:
"""Represent all CityBikes networks."""
def __init__(self, hass):
"""Initialize the networks instance."""
self.hass = hass
self.networks = None
self.networks_loading = asyncio.Condition()
async def get_closest_network_id(self, latitude, longitude):
"""Return the id of the network closest to provided location."""
try:
await self.networks_loading.acquire()
if self.networks is None:
networks = await async_citybikes_request(
self.hass, NETWORKS_URI, NETWORKS_RESPONSE_SCHEMA
)
self.networks = networks[ATTR_NETWORKS_LIST]
result = None
minimum_dist = None
for network in self.networks:
network_latitude = network[ATTR_LOCATION][ATTR_LATITUDE]
network_longitude = network[ATTR_LOCATION][ATTR_LONGITUDE]
dist = location.distance(
latitude, longitude, network_latitude, network_longitude
)
if minimum_dist is None or dist < minimum_dist:
minimum_dist = dist
result = network[ATTR_ID]
return result
except CityBikesRequestError as err:
raise PlatformNotReady from err
finally:
self.networks_loading.release()
class CityBikesNetwork:
"""Thin wrapper around a CityBikes network object."""
def __init__(self, hass, network_id):
"""Initialize the network object."""
self.hass = hass
self.network_id = network_id
self.stations = []
self.ready = asyncio.Event()
async def async_refresh(self, now=None):
"""Refresh the state of the network."""
try:
network = await async_citybikes_request(
self.hass,
STATIONS_URI.format(uid=self.network_id),
STATIONS_RESPONSE_SCHEMA,
)
self.stations = network[ATTR_NETWORK][ATTR_STATIONS_LIST]
self.ready.set()
except CityBikesRequestError as err:
if now is not None:
self.ready.clear()
else:
raise PlatformNotReady from err
class CityBikesStation(SensorEntity):
"""CityBikes API Sensor."""
_attr_attribution = CITYBIKES_ATTRIBUTION
_attr_native_unit_of_measurement = "bikes"
_attr_icon = "mdi:bike"
def __init__(self, network, station_id, entity_id):
"""Initialize the sensor."""
self._network = network
self._station_id = station_id
self.entity_id = entity_id
async def async_update(self) -> None:
"""Update station state."""
for station in self._network.stations:
if station[ATTR_ID] == self._station_id:
station_data = station
break
self._attr_name = station_data.get(ATTR_NAME)
self._attr_native_value = station_data.get(ATTR_FREE_BIKES)
self._attr_extra_state_attributes = {
ATTR_UID: station_data.get(ATTR_EXTRA, {}).get(ATTR_UID),
ATTR_LATITUDE: station_data.get(ATTR_LATITUDE),
ATTR_LONGITUDE: station_data.get(ATTR_LONGITUDE),
ATTR_EMPTY_SLOTS: station_data.get(ATTR_EMPTY_SLOTS),
ATTR_TIMESTAMP: station_data.get(ATTR_TIMESTAMP),
}
|
d5a1260965ef6734f49c8c0727cd743756ef2857
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/sfr_box/button.py
|
c9418bcc2e9c4e0d8fdafad53ca0c389294eaa89
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,205
|
py
|
button.py
|
"""SFR Box button platform."""
from __future__ import annotations
from collections.abc import Awaitable, Callable, Coroutine
from dataclasses import dataclass
from functools import wraps
from typing import Any, Concatenate, ParamSpec, TypeVar
from sfrbox_api.bridge import SFRBox
from sfrbox_api.exceptions import SFRBoxError
from sfrbox_api.models import SystemInfo
from homeassistant.components.button import (
ButtonDeviceClass,
ButtonEntity,
ButtonEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .models import DomainData
_T = TypeVar("_T")
_P = ParamSpec("_P")
def with_error_wrapping(
func: Callable[Concatenate[SFRBoxButton, _P], Awaitable[_T]]
) -> Callable[Concatenate[SFRBoxButton, _P], Coroutine[Any, Any, _T]]:
"""Catch SFR errors."""
@wraps(func)
async def wrapper(
self: SFRBoxButton,
*args: _P.args,
**kwargs: _P.kwargs,
) -> _T:
"""Catch SFRBoxError errors and raise HomeAssistantError."""
try:
return await func(self, *args, **kwargs)
except SFRBoxError as err:
raise HomeAssistantError(err) from err
return wrapper
@dataclass
class SFRBoxButtonMixin:
"""Mixin for SFR Box buttons."""
async_press: Callable[[SFRBox], Coroutine[None, None, None]]
@dataclass
class SFRBoxButtonEntityDescription(ButtonEntityDescription, SFRBoxButtonMixin):
"""Description for SFR Box buttons."""
BUTTON_TYPES: tuple[SFRBoxButtonEntityDescription, ...] = (
SFRBoxButtonEntityDescription(
async_press=lambda x: x.system_reboot(),
device_class=ButtonDeviceClass.RESTART,
entity_category=EntityCategory.CONFIG,
key="system_reboot",
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the buttons."""
data: DomainData = hass.data[DOMAIN][entry.entry_id]
entities = [
SFRBoxButton(data.box, description, data.system.data)
for description in BUTTON_TYPES
]
async_add_entities(entities)
class SFRBoxButton(ButtonEntity):
"""Mixin for button specific attributes."""
entity_description: SFRBoxButtonEntityDescription
_attr_has_entity_name = True
def __init__(
self,
box: SFRBox,
description: SFRBoxButtonEntityDescription,
system_info: SystemInfo,
) -> None:
"""Initialize the sensor."""
self.entity_description = description
self._box = box
self._attr_unique_id = f"{system_info.mac_addr}_{description.key}"
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, system_info.mac_addr)},
)
@with_error_wrapping
async def async_press(self) -> None:
"""Process the button press."""
await self.entity_description.async_press(self._box)
|
f1dca34fbe381fd1ccbc1a32cedcefa547a47274
|
ed7e61c8eef7fb2213adeb67557d605470c17fb3
|
/medium/property-based-testing/is_mail.py
|
1c74fa82e1d68fc33153597bf7b53112e462d622
|
[] |
no_license
|
MartinThoma/algorithms
|
535840224323822f2ea6b7dd6f82a0fdd22a0ff9
|
a251e9599b685dbf89c891f02d20fefd8538ead5
|
refs/heads/master
| 2023-02-23T17:58:10.913634
| 2023-02-21T05:58:59
| 2023-02-21T05:58:59
| 4,939,076
| 241
| 126
| null | 2023-02-16T05:16:23
| 2012-07-07T16:07:23
|
Python
|
UTF-8
|
Python
| false
| false
| 342
|
py
|
is_mail.py
|
from email.utils import parseaddr
def is_email(potential_email_address: str) -> bool:
context, mail = parseaddr(potential_email_address)
first_condition = len(context) == 0
dot_after_at = (
"@" in potential_email_address and "." in potential_email_address.split("@")[1]
)
return first_condition and dot_after_at
|
b7febfe86e2a05a49e1b05d853e774e17bfd4299
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/nlp/ktnet/src/reader/data_processor_seq.py
|
3fde15906b9018bda91bf8edb10d1df255d14781
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 12,086
|
py
|
data_processor_seq.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''
Dataset reader for preprocessing and converting dataset into bin.
'''
import io
import os
import argparse
import collections
import six
import numpy as np
from squad_twomemory import DataProcessor as SquadDataProcessor
from tokenization import FullTokenizer
def csv_reader(fd, delimiter='\t'):
"""
load csv file
"""
def gen():
for i in fd:
slots = i.rstrip('\n').split(delimiter)
if len(slots) == 1:
yield (slots,)
else:
yield slots
return gen()
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
text = text
elif isinstance(text, bytes):
text = text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
text = text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
text = text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
return text
class BaseReader:
"""BaseReader for classify and sequence labeling task"""
def __init__(self,
vocab_path,
label_map_config=None,
max_seq_len=512,
do_lower_case=True,
in_tokens=False,
random_seed=None):
self.max_seq_len = max_seq_len
self.tokenizer = FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
if label_map_config:
self.label_map = 0
else:
self.label_map = None
def _read_tsv(self, input_file):
"""Reads a tab separated value file."""
with io.open(input_file, "r", encoding="utf8") as f:
reader = csv_reader(f, delimiter="\t")
headers = next(reader)
Example = collections.namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def get_num_examples(self, input_file):
"""return total number of examples"""
examples = self._read_tsv(input_file)
return len(examples)
def get_examples(self, input_file):
examples = self._read_tsv(input_file)
return examples
def get_all_path(self, output_path):
"""
Args:
output_path: save path of convert dataset
Returns:
the path of ids, mask, token, label
"""
input_mask_path = os.path.join(output_path, "00_data")
src_ids_path = os.path.join(output_path, "01_data")
pos_ids_path = os.path.join(output_path, "02_data")
sent_ids_path = os.path.join(output_path, "03_data")
wn_concept_ids_path = os.path.join(output_path, "04_data")
nell_concept_ids_path = os.path.join(output_path, "05_data")
unique_id_path = os.path.join(output_path, "06_data")
for path in [input_mask_path, src_ids_path, pos_ids_path, sent_ids_path,
wn_concept_ids_path, nell_concept_ids_path, unique_id_path]:
os.makedirs(path, 0o755, exist_ok=True)
return input_mask_path, src_ids_path, pos_ids_path, sent_ids_path,\
wn_concept_ids_path, nell_concept_ids_path, unique_id_path
def read_concept_embedding(self, embedding_path):
"""read concept embedding"""
fin = open(embedding_path, encoding='utf-8')
info = [line.strip() for line in fin]
dim = len(info[0].split(' ')[1:])
embedding_mat = []
id2concept, concept2id = [], {}
# add padding concept into vocab
id2concept.append('<pad_concept>')
concept2id['<pad_concept>'] = 0
embedding_mat.append([0.0 for _ in range(dim)])
for line in info:
concept_name = line.split(' ')[0]
embedding = [float(value_str) for value_str in line.split(' ')[1:]]
assert len(embedding) == dim and not np.any(np.isnan(embedding))
embedding_mat.append(embedding)
concept2id[concept_name] = len(id2concept)
id2concept.append(concept_name)
return concept2id
def file_based_convert_examples_to_features(self, data_url, output_file):
""""Convert a set of `InputExample`s to a MindDataset file."""
wn_concept2id = self.read_concept_embedding(data_url + "/KB_embeddings/wn_concept2vec.txt")
nell_concept2id = self.read_concept_embedding(data_url + "/KB_embeddings/nell_concept2vec.txt")
processor = SquadDataProcessor(
vocab_path=data_url + "/cased_L-24_H-1024_A-16/vocab.txt",
do_lower_case=False,
max_seq_length=384,
in_tokens=False,
doc_stride=128,
max_query_length=64)
print("squad predict data process begin")
eval_concept_settings = {
'tokenization_path': data_url + '/tokenization_squad/tokens/dev.tokenization.cased.data',
'wn_concept2id': wn_concept2id,
'nell_concept2id': nell_concept2id,
'use_wordnet': True,
'retrieved_synset_path': data_url + "/retrieve_wordnet/output_squad/retrived_synsets.data",
'use_nell': True,
'retrieved_nell_concept_path': data_url + "/retrieve_nell/output_squad/dev.retrieved_nell_concepts.data",
}
eval_data_generator = processor.data_generator(
data_path=data_url + "/SQuAD/dev-v1.1.json",
batch_size=1,
phase='predict',
shuffle=False,
dev_count=1,
epoch=1,
**eval_concept_settings)
output_input_mask, output_src_ids, output_pos_ids, output_sent_ids,\
output_wn_concept_ids, output_nell_concept_ids, output_unique_id = self.get_all_path(output_file)
example_count = 0
for example in eval_data_generator():
src_ids = example[0]
pos_ids = example[1]
sent_ids = example[2]
wn_concept_ids = example[3]
nell_concept_ids = example[4]
input_mask = example[5]
unique_id = example[6]
nell_concept_ids = np.pad(nell_concept_ids, ((0, 0), (0, 0), (0, 3), (0, 0)),
'constant', constant_values=((0, 0), (0, 0), (0, 0), (0, 0)))
file_name = "squad" + "_" + str(example_count) + ".bin"
input_mask_file_path = os.path.join(output_input_mask, file_name)
np.array(input_mask, dtype=np.float32).tofile(input_mask_file_path)
src_ids_file_path = os.path.join(output_src_ids, file_name)
np.array(src_ids, dtype=np.int64).tofile(src_ids_file_path)
pos_ids_file_path = os.path.join(output_pos_ids, file_name)
np.array(pos_ids, dtype=np.int64).tofile(pos_ids_file_path)
sent_ids_file_path = os.path.join(output_sent_ids, file_name)
np.array(sent_ids, dtype=np.int64).tofile(sent_ids_file_path)
wn_concept_ids_file_path = os.path.join(output_wn_concept_ids, file_name)
np.array(wn_concept_ids, dtype=np.int64).tofile(wn_concept_ids_file_path)
nell_concept_ids_file_path = os.path.join(output_nell_concept_ids, file_name)
np.array(nell_concept_ids, dtype=np.int64).tofile(nell_concept_ids_file_path)
unique_id_file_path = os.path.join(output_unique_id, file_name)
np.array(unique_id, dtype=np.int64).tofile(unique_id_file_path)
example_count += 1
if example_count % 3000 == 0:
print(example_count)
print("total example:", example_count)
class ClassifyReader(BaseReader):
"""ClassifyReader"""
def _read_tsv(self, input_file):
"""Reads a tab separated value file."""
with io.open(input_file, "r", encoding="utf8") as f:
reader = csv_reader(f, delimiter="\t")
headers = next(reader)
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = collections.namedtuple('Example', headers)
examples = []
for line in reader:
for index, text in enumerate(line):
if index in text_indices:
line[index] = text.replace(' ', '')
example = Example(*line)
examples.append(example)
return examples
def main():
parser = argparse.ArgumentParser(description="read dataset and save it to bin")
parser.add_argument("--vocab_file", type=str, default="", help="vocab file")
parser.add_argument("--label_map_config", type=str, default=None, help="label mapping config file")
parser.add_argument("--max_seq_len", type=int, default=64,
help="The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
parser.add_argument("--do_lower_case", type=bool, default=True,
help="Whether to lower case the input text. "
"Should be True for uncased models and False for cased models.")
parser.add_argument("--random_seed", type=int, default=1, help="random seed number")
parser.add_argument("--data_path", type=str, default="../data", help="the format of infer file is tsv.")
parser.add_argument("--output_path", type=str, default="./data", help="the path of convert dataset.")
args_opt = parser.parse_args()
reader = ClassifyReader(
vocab_path=args_opt.vocab_file,
label_map_config=args_opt.label_map_config,
max_seq_len=args_opt.max_seq_len,
do_lower_case=args_opt.do_lower_case,
random_seed=args_opt.random_seed
)
reader.file_based_convert_examples_to_features(data_url=args_opt.data_path, output_file=args_opt.output_path)
if __name__ == "__main__":
main()
|
4283e3d63a4d6b414433ca8a6e4fb34793626151
|
1f399edf85d995443d01f66d77eca0723886d0ff
|
/misc/config_tools/board_inspector/acpiparser/__init__.py
|
d49bee3e3058c1aa5536444a371ed11cfcd90bec
|
[
"BSD-3-Clause"
] |
permissive
|
projectacrn/acrn-hypervisor
|
f9c5864d54929a5d2fa36b5e78c08f19b46b8f98
|
390740aa1b1e9d62c51f8e3afa0c29e07e43fa23
|
refs/heads/master
| 2023-08-18T05:07:01.310327
| 2023-08-11T07:49:36
| 2023-08-16T13:20:27
| 123,983,554
| 1,059
| 686
|
BSD-3-Clause
| 2023-09-14T09:51:10
| 2018-03-05T21:52:25
|
C
|
UTF-8
|
Python
| false
| false
| 1,250
|
py
|
__init__.py
|
# Copyright (C) 2021-2022 Intel Corporation.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import sys, os
from acpiparser.apic import APIC
from acpiparser.asf import ASF
from acpiparser.dmar import DMAR
from acpiparser.dsdt import DSDT
from acpiparser.facp import FACP
from acpiparser.rtct import RTCT
from acpiparser.rdt import parse_resource_data
from acpiparser.prt import parse_pci_routing
from acpiparser.tpm2 import TPM2
def parse_table(signature, path=None):
if not path:
path = f"/sys/firmware/acpi/tables/{signature}"
signature = signature.rstrip("!")
fn = getattr(sys.modules[f"acpiparser.{signature.lower()}"], signature)
return fn(path)
def make_parser(signature):
def parse(path=None):
return parse_table(signature, path)
return parse
parse_apic = make_parser('APIC')
parse_asf = make_parser('ASF!')
parse_dsdt = make_parser('DSDT')
parse_dmar = make_parser('DMAR')
parse_facp = make_parser('FACP')
parse_tpm2 = make_parser('TPM2')
def parse_rtct(path=None):
if not path:
path = f"/sys/firmware/acpi/tables/RTCT"
if not os.path.exists(path):
path = f"/sys/firmware/acpi/tables/PTCT"
fn = getattr(sys.modules[f"acpiparser.rtct"], "RTCT")
return fn(path)
|
b95e39624649b265985e1619e3f6c7271cd73e07
|
636849fc7edd9dcb095cf3410a121ab37de69f02
|
/SoftLayer/fixtures/SoftLayer_Network_Message_Delivery_Email_Sendgrid.py
|
6a64d1be68445b9e5d155fa7dae42df57cc558dd
|
[
"MIT"
] |
permissive
|
softlayer/softlayer-python
|
bcb09306c3367fdbd2f1407f770c4959729b074c
|
5798373055d9f34dfd531d81638a64d0a7901a13
|
refs/heads/master
| 2023-08-23T19:32:36.990701
| 2023-08-21T03:29:44
| 2023-08-21T03:29:44
| 622,291
| 126
| 182
|
MIT
| 2023-09-14T15:04:48
| 2010-04-21T20:36:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
SoftLayer_Network_Message_Delivery_Email_Sendgrid.py
|
getAccountOverview = {
"creditsAllowed": 25000,
"creditsOverage": 0,
"creditsRemain": 25000,
"creditsUsed": 0,
"package": "Free Package",
"reputation": 100,
"requests": 56
}
getStatistics = [{
"blocks": 0,
"bounces": 0,
"clicks": 0,
"date": "2021-04-28",
"delivered": 0,
"invalidEmail": 0,
"opens": 0,
"repeatBounces": 0,
"repeatSpamReports": 0,
"repeatUnsubscribes": 0,
"requests": 0,
"spamReports": 0,
"uniqueClicks": 0,
"uniqueOpens": 0,
"unsubscribes": 0
}]
getObject = {
"accountId": 123456,
"createDate": "2020-07-06T10:29:11-06:00",
"id": 1232123,
"password": "Test123456789",
"typeId": 21,
"username": "techsupport3@ie.ibm.com",
"vendorId": 1,
"billingItem": {
"categoryCode": "network_message_delivery",
"description": "Free Package",
"id": 695735054,
"notes": "techsupport3@ie.ibm.com",
},
"type": {
"description": "Delivery of messages through e-mail",
"id": 21,
"keyName": "EMAIL",
"name": "Email"
},
"vendor": {
"id": 1,
"keyName": "SENDGRID",
"name": "SendGrid"
},
"emailAddress": "techsupport3@ie.ibm.com",
"smtpAccess": "1"
}
editObject = True
updateEmailAddress = True
|
976813f28ad2b0bdd538512c52ac08e6125fae55
|
45e2f5288afd65cd6b2213117e7df4fdc72847c1
|
/integration_tests/src/main/python/generate_expr_test.py
|
5d7779ea35ef4b4b9bfaa99780e1109687708822
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Zlib",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NVIDIA/spark-rapids
|
49cc2fb633f488dd48337c02b227502fcd473a12
|
5d5b3570eab2c8bb8d77d99613b19197b630a453
|
refs/heads/branch-23.10
| 2023-09-03T19:16:46.422726
| 2023-09-01T13:16:45
| 2023-09-01T13:16:45
| 264,043,501
| 600
| 212
|
Apache-2.0
| 2023-09-13T23:00:22
| 2020-05-14T22:56:44
|
Scala
|
UTF-8
|
Python
| false
| false
| 10,669
|
py
|
generate_expr_test.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_fallback_collect
from data_gen import *
from marks import allow_non_gpu, ignore_order
from pyspark.sql.types import *
import pyspark.sql.functions as f
pytestmark = pytest.mark.nightly_resource_consuming_test
explode_gens = all_gen + [binary_gen]
arrays_with_binary = [ArrayGen(BinaryGen(max_length=5))]
maps_with_binary = [MapGen(IntegerGen(nullable=False), BinaryGen(max_length=5))]
def four_op_df(spark, gen, length=2048, seed=0):
return gen_df(spark, StructGen([
('a', gen),
('b', gen),
('c', gen),
('d', gen)], nullable=False), length=length, seed=seed)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_explode_makearray(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).selectExpr('a', 'explode(array(b, c, d))'))
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_explode_litarray(data_gen):
array_lit = gen_scalar(ArrayGen(data_gen, min_length=3, max_length=3, nullable=False))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).select(f.col('a'), f.col('b'), f.col('c'),
f.explode(array_lit)))
# use a small `spark.rapids.sql.batchSizeBytes` to enforce input batches splitting up during explode
conf_to_enforce_split_input = {'spark.rapids.sql.batchSizeBytes': '8192'}
@ignore_order(local=True)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('data_gen', explode_gens + struct_gens_sample_with_decimal128 +
array_gens_sample + map_gens_sample + arrays_with_binary + maps_with_binary,
ids=idfn)
def test_explode_array_data(data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample + decimal_128_map_gens + maps_with_binary, ids=idfn)
def test_explode_map_data(map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_explode_nested_array_data(data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'explode(b) as c').selectExpr('a', 'explode(c)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('data_gen', explode_gens + struct_gens_sample_with_decimal128 +
array_gens_sample + arrays_with_binary + map_gens_sample + maps_with_binary,
ids=idfn)
def test_explode_outer_array_data(data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample + decimal_128_map_gens + maps_with_binary, ids=idfn)
def test_explode_outer_map_data(map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_explode_outer_nested_array_data(data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'explode_outer(b) as c').selectExpr('a', 'explode_outer(c)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_posexplode_makearray(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).selectExpr('posexplode(array(b, c, d))', 'a'))
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_posexplode_litarray(data_gen):
array_lit = gen_scalar(ArrayGen(data_gen, min_length=3, max_length=3, nullable=False))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).select(f.col('a'), f.col('b'), f.col('c'),
f.posexplode(array_lit)))
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('data_gen', explode_gens + struct_gens_sample_with_decimal128 +
array_gens_sample + arrays_with_binary + map_gens_sample + maps_with_binary,
ids=idfn)
def test_posexplode_array_data(data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample + decimal_128_map_gens + maps_with_binary, ids=idfn)
def test_posexplode_map_data(map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_posexplode_nested_array_data(data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'posexplode(b) as (pos, c)').selectExpr('a', 'pos', 'posexplode(c)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('data_gen', explode_gens + struct_gens_sample_with_decimal128 +
array_gens_sample + arrays_with_binary + map_gens_sample + maps_with_binary,
ids=idfn)
def test_posexplode_outer_array_data(data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample + decimal_128_map_gens + maps_with_binary, ids=idfn)
def test_posexplode_outer_map_data(map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_posexplode_nested_outer_array_data(data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'posexplode_outer(b) as (pos, c)').selectExpr(
'a', 'pos', 'posexplode_outer(c)'),
conf=conf_to_enforce_split_input)
@allow_non_gpu("GenerateExec", "ShuffleExchangeExec")
@ignore_order(local=True)
def test_generate_outer_fallback():
assert_gpu_fallback_collect(
lambda spark: spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as x")\
.repartition(1).selectExpr("inline_outer(x)"),
"GenerateExec")
|
95a310f3d5e2801c4b5f9fb52214c65abf99122e
|
21e56f433a800c6f3c1e7b79b04f0a27ce788a51
|
/examples/celeryconfig.py
|
fd2c7a779436b76ec65e7469f903b0f9526dba02
|
[
"BSD-3-Clause"
] |
permissive
|
mher/flower
|
01932e724d945650401c6482856f727f46d1d254
|
066cf497f60e1fb89b439504bd5e4e6cfec8f044
|
refs/heads/master
| 2023-09-03T10:00:03.702448
| 2023-07-04T14:13:43
| 2023-07-04T14:13:43
| 4,946,406
| 5,213
| 1,126
|
NOASSERTION
| 2023-09-13T01:31:53
| 2012-07-08T13:35:09
|
Python
|
UTF-8
|
Python
| false
| false
| 120
|
py
|
celeryconfig.py
|
broker_url = 'redis://localhost:6379/0'
celery_result_backend = 'redis://localhost:6379/0'
task_send_sent_event = False
|
450e4f1b0b9f06e4c9051d9faa888aea1d0bd114
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/QuickChart__examples/to_file.py
|
e2add1ba90d7700366204693452f820ec88ca7bd
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 246
|
py
|
to_file.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
from pathlib import Path
from common import get_chart
PATH = Path(__file__).resolve().absolute()
FILE_NAME = Path(f"{PATH}.png")
qc = get_chart()
qc.to_file(FILE_NAME)
|
76fca89a69976b7751322a05fd17a28b451e1844
|
8d77f3b72dc52b85ee0c4ef6ba06f63a6920841f
|
/fx2ait/fx2ait/test/test_tensor_spec.py
|
4ab33f2184233e036b64071a55f499049588257b
|
[
"Apache-2.0"
] |
permissive
|
facebookincubator/AITemplate
|
b643c217e1d15f7f17dab1eb1cc6855eab664b97
|
c60dc19788217556ba12ea378c02b9fd0aea9ffe
|
refs/heads/main
| 2023-08-28T18:22:15.828008
| 2023-08-28T14:43:41
| 2023-08-28T14:43:41
| 514,321,895
| 4,065
| 334
|
Apache-2.0
| 2023-09-14T04:53:57
| 2022-07-15T15:40:58
|
Python
|
UTF-8
|
Python
| false
| false
| 6,135
|
py
|
test_tensor_spec.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import torch
from aitemplate.compiler.public import IntImm, IntVar
from fx2ait.tensor_spec import TensorSpec
from parameterized import parameterized
class TestTensorSpec(unittest.TestCase):
def test_two_input_lists(self):
inputs1 = [
torch.empty([1, 3, 4], dtype=torch.float16),
torch.empty([5, 6], dtype=torch.int32),
[
torch.empty([7, 128, 9], dtype=torch.float16),
torch.empty([1, 16], dtype=torch.float16),
],
]
inputs2 = [
torch.empty([32, 3, 4], dtype=torch.float16),
torch.empty([5, 6], dtype=torch.int32),
[
torch.empty([7, 1, 9], dtype=torch.float16),
torch.empty([32, 16], dtype=torch.float16),
],
]
specs = TensorSpec.from_two_input_lists(inputs1, inputs2)
self.assertEqual(3, len(specs))
self.assertEqual(2, len(specs[2]))
self.assertEqual(
TensorSpec(
[IntVar([1, 32], "dynamic_dim_0"), IntImm(3), IntImm(4)], torch.float16
),
specs[0],
)
self.assertEqual(TensorSpec([IntImm(5), IntImm(6)], torch.int32), specs[1])
self.assertEqual(
TensorSpec(
[IntImm(7), IntVar([1, 128], "dynamic_dim_1"), IntImm(9)], torch.float16
),
specs[2][0],
)
self.assertEqual(
TensorSpec([IntVar([1, 32], "dynamic_dim_0"), IntImm(16)], torch.float16),
specs[2][1],
)
@parameterized.expand(
[
("single", [([10, 3, 4], torch.float16)]),
(
"multi",
[
([10, 3, 4], torch.float16),
([10, 6], torch.int32),
([10, 8, 9], torch.float16),
],
),
(
"different_bs_dim",
[
([10, 3, 4], torch.float16),
([10, 6], torch.int32),
([4, 10, 9], torch.float16),
],
),
(
"same_shapes",
[
([10, 3, 40, 5], torch.float16),
([10, 3, 40, 5], torch.float16),
([10, 3, 40, 5], torch.float32),
],
),
(
"leftmost_bs_dim",
[
([10, 20, 30], torch.float16),
([10, 30, 20], torch.float16),
([20, 10, 30], torch.float32),
],
),
]
)
def test_input_list_with_batch_size(self, _, settings):
inputs = [torch.empty(setting[0], dtype=setting[1]) for setting in settings]
# Test case default batch_size = 10, avoid set other shape param with this value
batch_size = 10
specs = TensorSpec.from_input_list_with_batch_size(inputs, 32)
self.assertEqual(len(settings), len(specs))
for index, setting in enumerate(settings):
expected_shape = setting[0]
expected_spec = []
for shape in expected_shape:
if shape == batch_size:
expected_spec.append(IntVar([1, 32], "batch_size"))
else:
expected_spec.append(IntImm(shape))
self.assertEqual(
TensorSpec(expected_spec, setting[1]),
specs[index],
)
def test_input_list_with_batch_size_non_default_dim(self):
inputs = [
torch.empty([2, 10, 4], dtype=torch.float16),
torch.empty([5, 10], dtype=torch.int32),
torch.empty([7, 10, 9], dtype=torch.float16),
]
specs = TensorSpec.from_input_list_with_batch_size(inputs, 32, 1)
self.assertEqual(3, len(specs))
self.assertEqual(
TensorSpec(
[IntImm(2), IntVar([1, 32], "batch_size"), IntImm(4)], torch.float16
),
specs[0],
)
self.assertEqual(
TensorSpec([IntImm(5), IntVar([1, 32], "batch_size")], torch.int32),
specs[1],
)
self.assertEqual(
TensorSpec(
[IntImm(7), IntVar([1, 32], "batch_size"), IntImm(9)], torch.float16
),
specs[2],
)
def test_input_with_no_bs_tensor(self):
inputs = [
torch.empty([2, 10, 4], dtype=torch.float16),
torch.empty([20], dtype=torch.int32),
torch.empty([7, 10, 9], dtype=torch.float16),
torch.empty([20, 7, 10, 9], dtype=torch.float16),
]
specs = TensorSpec.from_input_list_with_batch_size(inputs, 32, 1)
self.assertEqual(4, len(specs))
self.assertEqual(
TensorSpec(
[IntImm(2), IntVar([1, 32], "batch_size"), IntImm(4)], torch.float16
),
specs[0],
)
self.assertEqual(
TensorSpec([IntImm(20)], torch.int32),
specs[1],
)
self.assertEqual(
TensorSpec(
[IntImm(7), IntVar([1, 32], "batch_size"), IntImm(9)], torch.float16
),
specs[2],
)
self.assertEqual(
TensorSpec(
[IntImm(20), IntImm(7), IntVar([1, 32], "batch_size"), IntImm(9)],
torch.float16,
),
specs[3],
)
|
973aa0202ad731f941a6c51cd5d182d3ec9618bc
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/dbshell/fake_client.py
|
70451f302a73ad31961fa26c06b2569898d2f01a
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 24
|
py
|
fake_client.py
|
import sys
sys.exit(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.